code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = "▁"
a : Any = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
a : Any = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
a : List[str] = {
"facebook/s2t-small-librispeech-asr": 10_24,
}
a : Optional[Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
a : Tuple = {"mustc": MUSTC_LANGS}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , snake_case , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="<pad>" , snake_case="<unk>" , snake_case=False , snake_case=False , snake_case=None , snake_case=None , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , do_upper_case=snake_case , do_lower_case=snake_case , tgt_lang=snake_case , lang_codes=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : Optional[Any] = do_upper_case
UpperCAmelCase : Dict = do_lower_case
UpperCAmelCase : Any = load_json(snake_case )
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : List[Any] = spm_file
UpperCAmelCase : Optional[int] = load_spm(snake_case , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase : Tuple = lang_codes
UpperCAmelCase : List[str] = LANGUAGES[lang_codes]
UpperCAmelCase : Optional[Any] = [f"<lang:{lang}>" for lang in self.langs]
UpperCAmelCase : int = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
UpperCAmelCase : int = self.lang_tokens
UpperCAmelCase : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase : int = {}
@property
def A_ ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def A_ ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = new_tgt_lang
self.set_tgt_lang_special_tokens(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase : List[Any] = [lang_code_id]
def A_ ( self , snake_case ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder[self.unk_token] )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.decoder.get(snake_case , self.unk_token )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
UpperCAmelCase : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase : List[Any] = self.sp_model.decode(snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase : str = []
else:
current_sub_tokens.append(snake_case )
UpperCAmelCase : List[str] = self.sp_model.decode(snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
UpperCAmelCase : Dict = [1] * len(self.prefix_tokens )
UpperCAmelCase : int = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case )) + suffix_ones
return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : List[Any] = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path(snake_case )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
UpperCAmelCase : List[str] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase : str = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case )
elif not os.path.isfile(self.spm_file ):
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (str(snake_case ), str(snake_case ))
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = sentencepiece.SentencePieceProcessor(**__magic_name__ )
spm.Load(str(__magic_name__ ) )
return spm
def lowercase ( __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "r" ) as f:
return json.load(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=2 )
| 679
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Optional[int]=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[int]=64 , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : Tuple = use_auxiliary_loss
__UpperCamelCase : Optional[Any] = num_queries
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : int = min_size
__UpperCamelCase : List[Any] = max_size
__UpperCamelCase : List[str] = num_labels
__UpperCamelCase : Optional[Any] = hidden_dim
__UpperCamelCase : Optional[Any] = hidden_dim
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__UpperCamelCase : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__UpperCamelCase : Any = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__UpperCamelCase : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCamelCase : Any = self.num_queries
__UpperCamelCase : List[Any] = self.num_labels
__UpperCamelCase : List[str] = [1, 1, 1, 1]
__UpperCamelCase : str = self.num_channels
__UpperCamelCase : int = 64
__UpperCamelCase : Optional[Any] = 1_28
__UpperCamelCase : List[str] = self.hidden_dim
__UpperCamelCase : int = self.hidden_dim
__UpperCamelCase : List[str] = self.hidden_dim
return config
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.prepare_config_and_inputs()
__UpperCamelCase : Optional[int] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def a ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
"""simple docstring"""
__UpperCamelCase : List[Any] = output.encoder_hidden_states
__UpperCamelCase : Union[str, Any] = output.pixel_decoder_hidden_states
__UpperCamelCase : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def a ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict=False ):
"""simple docstring"""
with torch.no_grad():
__UpperCamelCase : Any = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def a ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase : int = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCamelCase : Any = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__UpperCamelCase : Dict = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__UpperCamelCase : List[Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : int = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ : Optional[int] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowercase_ : int = False
lowercase_ : Optional[int] = False
lowercase_ : Optional[Any] = False
lowercase_ : List[str] = False
def a ( self : List[str] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = MaskaFormerModelTester(self )
__UpperCamelCase : Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def a ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def a ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def a ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def a ( self : Tuple ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a ( self : int ):
"""simple docstring"""
pass
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Optional[Any] = model_class(lowerCamelCase__ )
__UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[Any] = [*signature.parameters.keys()]
__UpperCamelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def a ( self : int ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCamelCase : Optional[Any] = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (self.model_tester.min_size,) * 2
__UpperCamelCase : Optional[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"""class_labels""": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__UpperCamelCase : Dict = self.model_tester.get_config()
__UpperCamelCase : List[str] = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__UpperCamelCase : Tuple = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__UpperCamelCase : Any = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__UpperCamelCase : Union[str, Any] = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__UpperCamelCase : Tuple = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def a ( self : List[str] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.all_model_classes[1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase : int = True
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[int] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__UpperCamelCase : Tuple = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCamelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCamelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase = 1e-4
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
@cached_property
def a ( self : Optional[int] ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def a ( self : Dict ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : str = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__UpperCamelCase : List[Any] = self.default_image_processor
__UpperCamelCase : Any = prepare_img()
__UpperCamelCase : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(**lowerCamelCase__ )
__UpperCamelCase : Tuple = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__UpperCamelCase : Tuple = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__UpperCamelCase : Optional[Any] = self.default_image_processor
__UpperCamelCase : Optional[Any] = prepare_img()
__UpperCamelCase : Union[str, Any] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__UpperCamelCase : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__UpperCamelCase : Dict = model(**lowerCamelCase__ )
# masks_queries_logits
__UpperCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCamelCase : List[str] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__UpperCamelCase : Tuple = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__UpperCamelCase : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCamelCase : Optional[int] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__UpperCamelCase : Dict = self.default_image_processor
__UpperCamelCase : Optional[int] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__UpperCamelCase : Optional[Any] = inputs["""pixel_values"""].to(lowerCamelCase__ )
__UpperCamelCase : int = [el.to(lowerCamelCase__ ) for el in inputs["""mask_labels"""]]
__UpperCamelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__UpperCamelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 515
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Optional[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> str:
__UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Optional[int]:
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def __lowerCamelCase ( ) -> Tuple:
# laplace diagonals
__UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def __lowerCamelCase ( ) -> List[str]:
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def __lowerCamelCase ( ) -> int:
__UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
__UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__UpperCamelCase : int = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = image[x_coordinate][y_coordinate]
__UpperCamelCase : Tuple = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 515
| 1
|
from math import factorial, pi
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE_ : int = float(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE_ : str = float(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 105
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_features", "is_longer"]
def __init__( self : Any , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=48000 , UpperCamelCase_ : Optional[int]=480 , UpperCamelCase_ : Optional[int]=10 , UpperCamelCase_ : Union[str, Any]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = top_db
__UpperCAmelCase : str = truncation
__UpperCAmelCase : str = padding
__UpperCAmelCase : Union[str, Any] = fft_window_size
__UpperCAmelCase : str = (fft_window_size >> 1) + 1
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : int = max_length_s
__UpperCAmelCase : List[Any] = max_length_s * sampling_rate
__UpperCAmelCase : Optional[Any] = sampling_rate
__UpperCAmelCase : Tuple = frequency_min
__UpperCAmelCase : str = frequency_max
__UpperCAmelCase : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , )
__UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__)
__UpperCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a_ ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None):
"""simple docstring"""
__UpperCAmelCase : Any = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , )
return log_mel_spectrogram.T
def a_ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Any = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : int = [0]
# randomly choose index for each part
__UpperCAmelCase : Union[str, Any] = np.random.choice(ranges[0])
__UpperCAmelCase : List[str] = np.random.choice(ranges[1])
__UpperCAmelCase : Any = np.random.choice(ranges[2])
__UpperCAmelCase : Any = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Union[str, Any] = torch.tensor(mel[None, None, :])
__UpperCAmelCase : List[Any] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_)
__UpperCAmelCase : Dict = mel_shrink[0][0].numpy()
__UpperCAmelCase : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def a_ ( self : str , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : Optional[Any] = len(UpperCamelCase_) - max_length
__UpperCAmelCase : Optional[int] = np.random.randint(0 , overflow + 1)
__UpperCAmelCase : str = waveform[idx : idx + max_length]
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : List[Any] = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCAmelCase : str = False
else:
__UpperCAmelCase : List[Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : str = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__UpperCAmelCase : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Any = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : Optional[int] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Any = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : Any = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : Optional[Any] = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0)
if truncation == "fusion":
__UpperCAmelCase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCAmelCase : Optional[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : int , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : str , ):
"""simple docstring"""
__UpperCAmelCase : Any = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__UpperCAmelCase : Any = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCAmelCase : Union[str, Any] = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray):
__UpperCAmelCase : Optional[Any] = np.asarray(UpperCamelCase_ , dtype=np.floataa)
elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : int = [np.asarray(UpperCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : Any = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_)
for waveform in raw_speech
]
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_)
is_longer.append(UpperCamelCase_)
if truncation == "fusion" and sum(UpperCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase_))
__UpperCAmelCase : int = True
if isinstance(input_mel[0] , UpperCamelCase_):
__UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : Dict = [[longer] for longer in is_longer]
__UpperCAmelCase : Dict = {"input_features": input_mel, "is_longer": is_longer}
__UpperCAmelCase : List[str] = BatchFeature(UpperCamelCase_)
if return_tensors is not None:
__UpperCAmelCase : Optional[Any] = input_features.convert_to_tensors(UpperCamelCase_)
return input_features
| 487
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class a__ ( __magic_name__ ):
lowercase_ = ["image_processor", "feature_extractor"]
lowercase_ = "TvltImageProcessor"
lowercase_ = "TvltFeatureExtractor"
def __init__( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict):
"""simple docstring"""
super().__init__(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = image_processor
__UpperCAmelCase : Dict = feature_extractor
def __call__( self : Tuple , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=False , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : str , ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process.")
__UpperCAmelCase : Optional[Any] = None
if images is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , mask_pixel=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
if images_mixed is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , is_mixed=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
if audio is not None:
__UpperCAmelCase : List[Any] = self.feature_extractor(
UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , mask_audio=UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : List[str] = {}
if audio is not None:
output_dict.update(UpperCamelCase_)
if images is not None:
output_dict.update(UpperCamelCase_)
if images_mixed_dict is not None:
output_dict.update(UpperCamelCase_)
return output_dict
@property
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.image_processor.model_input_names
__UpperCAmelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 487
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = argparse.ArgumentParser()
parser.add_argument("-f" )
_A = parser.parse_args()
return args.f
def __lowercase ( __lowercase , __lowercase="eval" ) -> Optional[Any]:
'''simple docstring'''
_A = os.path.join(__lowercase , F'''{split}_results.json''' )
if os.path.exists(__lowercase ):
with open(__lowercase , "r" ) as f:
return json.load(__lowercase )
raise ValueError(F'''can\'t find {path}''' )
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_flax_glue.main()
_A = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_clm_flax.main()
_A = get_results(__UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_summarization_flax.main()
_A = get_results(__UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_mlm_flax.main()
_A = get_results(__UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_ta_mlm_flax.main()
_A = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = 7 if get_gpu_count() > 1 else 2
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_flax_ner.main()
_A = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.get_auto_remove_tmp_dir()
_A = f'''\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '''.split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
run_qa.main()
_A = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 330
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase( a , a = "cpu" , a = None ):
__a = torch.load(a , map_location=a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(a , a )
if __name__ == "__main__":
fire.Fire(convert)
| 528
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _lowercase ( self : str):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A__ : List[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Optional[Any] = TFAutoModel.from_pretrained(_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Dict = AutoModel.from_pretrained(_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : str):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A__ : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : int = AutoModelForPreTraining.from_pretrained(_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : Tuple):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(_A , from_pt=_A)
A__ , A__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(
_A , output_loading_info=_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Tuple = AutoModelForCausalLM.from_pretrained(_A , from_tf=_A)
A__ , A__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
_A , output_loading_info=_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : Tuple):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Any = TFAutoModelWithLMHead.from_pretrained(_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : List[Any] = AutoModelWithLMHead.from_pretrained(_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : List[str]):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_A , from_pt=_A)
A__ , A__ : Any = TFAutoModelForMaskedLM.from_pretrained(
_A , output_loading_info=_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Any = AutoModelForMaskedLM.from_pretrained(_A , from_tf=_A)
A__ , A__ : Dict = AutoModelForMaskedLM.from_pretrained(
_A , output_loading_info=_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : List[Any]):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_A , from_pt=_A)
A__ , A__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(
_A , output_loading_info=_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A , from_tf=_A)
A__ , A__ : int = AutoModelForSeqaSeqLM.from_pretrained(
_A , output_loading_info=_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : str):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A__ : Dict = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : Dict):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A__ : Optional[int] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(_A , from_pt=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Tuple = AutoModelForQuestionAnswering.from_pretrained(_A , from_tf=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
def _lowercase ( self : List[Any]):
A__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_A , from_pt=_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_4410)
A__ : Any = AutoModelWithLMHead.from_pretrained(_A , from_tf=_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_4410)
def _lowercase ( self : List[Any]):
A__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A , from_pt=_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_4410)
A__ : int = AutoModelWithLMHead.from_pretrained(_A , from_tf=_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_4410)
| 182
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : str = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182
| 1
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
pass
| 316
|
'''simple docstring'''
def lowerCamelCase_ ( A_ = 3 , A_ = 7 , A_ = 1_00_00_00 ):
__lowerCamelCase = 0
__lowerCamelCase = 1
for current_denominator in range(1 , limit + 1 ):
__lowerCamelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCamelCase = current_numerator
__lowerCamelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 316
| 1
|
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = num_of_nodes
lowerCAmelCase = []
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
self.m_edges.append([u_node, v_node, weight] )
def __snake_case ( self , UpperCAmelCase_ ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __snake_case ( self , UpperCAmelCase_ ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase = self.find_component(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge
lowerCAmelCase = self.m_component[u]
lowerCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge
lowerCAmelCase = self.m_component[u]
lowerCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__a : Optional[datasets.Features] =None
__a : str ="utf-8"
__a : Optional[str] =None
__a : Optional[str] =None
__a : bool =True # deprecated
__a : Optional[int] =None # deprecated
__a : int =1_0 << 2_0 # 10MB
__a : Optional[bool] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a : str =JsonConfig
def __snake_case ( self ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCAmelCase_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCAmelCase_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
lowerCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase = dataset
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 33
| 1
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCamelCase__ : List[Any] = '''src/transformers'''
UpperCamelCase__ : Tuple = '''docs/source/en/tasks'''
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Dict = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Any = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : Dict = direct_transformers_import(TRANSFORMERS_PATH)
UpperCamelCase__ : str = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCamelCase__ : List[str] = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase_ , set() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_model_list_for_task(lowerCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase__ : Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 105
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[str] = list(_A )
snake_case_ :Any = list(_A )
snake_case_ :Optional[Any] = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ :Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :Tuple = []
while True:
snake_case_ :int = ["$"] * len(_A )
snake_case_ :Union[str, Any] = []
for i in range(len(_A ) ):
for j in range(i + 1, len(_A ) ):
snake_case_ :Dict = compare_string(binary[i], binary[j] )
if k is False:
snake_case_ :Tuple = "*"
snake_case_ :List[str] = "*"
temp.append("X" )
for i in range(len(_A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_A ) == 0:
return pi
snake_case_ :Dict = list(set(_A ) )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[int] = []
for minterm in minterms:
snake_case_ :Tuple = ""
for _ in range(_A ):
snake_case_ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_A )
return temp
def A ( _A, _A, _A ):
"""simple docstring"""
snake_case_ :Tuple = list(_A )
snake_case_ :List[str] = list(_A )
snake_case_ :Dict = 0
for i in range(len(_A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :List[Any] = []
snake_case_ :List[Any] = [0] * len(_A )
for i in range(len(chart[0] ) ):
snake_case_ :List[Any] = 0
snake_case_ :Optional[Any] = -1
for j in range(len(_A ) ):
if chart[j][i] == 1:
count += 1
snake_case_ :Dict = j
if count == 1:
snake_case_ :str = 1
for i in range(len(_A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_A ) ):
snake_case_ :str = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ :Any = 0
snake_case_ :Optional[int] = -1
snake_case_ :List[Any] = 0
for i in range(len(_A ) ):
snake_case_ :str = chart[i].count(1 )
if count_n > max_n:
snake_case_ :Optional[Any] = count_n
snake_case_ :List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_A ) ):
snake_case_ :Any = 0
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = [[0 for x in range(len(_A ) )] for x in range(len(_A ) )]
for i in range(len(_A ) ):
snake_case_ :Dict = prime_implicants[i].count("_" )
for j in range(len(_A ) ):
if is_for_table(prime_implicants[i], binary[j], _A ):
snake_case_ :Optional[int] = 1
return chart
def A ( ):
"""simple docstring"""
snake_case_ :str = int(input("Enter the no. of variables\n" ) )
snake_case_ :Dict = [
float(_A )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ :Tuple = decimal_to_binary(_A, _A )
snake_case_ :Tuple = check(_A )
print("Prime Implicants are:" )
print(_A )
snake_case_ :List[Any] = prime_implicant_chart(_A, _A )
snake_case_ :int = selection(_A, _A )
print("Essential Prime Implicants are:" )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 584
| 0
|
def A_ ( snake_case : int , snake_case : list ) -> Optional[Any]:
'''simple docstring'''
_enforce_args(snake_case , snake_case )
if n == 0:
return 0
__UpperCamelCase = float('''-inf''' )
for i in range(1 , n + 1 ):
__UpperCamelCase = max(
snake_case , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case ) )
return max_revue
def A_ ( snake_case : int , snake_case : list ) -> Union[str, Any]:
'''simple docstring'''
_enforce_args(snake_case , snake_case )
__UpperCamelCase = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case , snake_case , snake_case )
def A_ ( snake_case : int , snake_case : list , snake_case : list ) -> Union[str, Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__UpperCamelCase = float('''-inf''' )
for i in range(1 , n + 1 ):
__UpperCamelCase = max(
snake_case , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case , snake_case ) , )
__UpperCamelCase = max_revenue
return max_rev[n]
def A_ ( snake_case : int , snake_case : list ) -> List[str]:
'''simple docstring'''
_enforce_args(snake_case , snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__UpperCamelCase = [float('''-inf''' ) for _ in range(n + 1 )]
__UpperCamelCase = 0
for i in range(1 , n + 1 ):
__UpperCamelCase = max_rev[i]
for j in range(1 , i + 1 ):
__UpperCamelCase = max(snake_case , prices[j - 1] + max_rev[i - j] )
__UpperCamelCase = max_revenue_i
return max_rev[n]
def A_ ( snake_case : int , snake_case : list ) -> Optional[Any]:
'''simple docstring'''
if n < 0:
__UpperCamelCase = f"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(snake_case )
if n > len(snake_case ):
__UpperCamelCase = (
'''Each integral piece of rod must have a corresponding price. '''
f"Got n = {n} but length of prices = {len(snake_case )}"
)
raise ValueError(snake_case )
def A_ ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = [6, 10, 12, 15, 20, 23]
__UpperCamelCase = len(snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__UpperCamelCase = 36
__UpperCamelCase = top_down_cut_rod(snake_case , snake_case )
__UpperCamelCase = bottom_up_cut_rod(snake_case , snake_case )
__UpperCamelCase = naive_cut_rod_recursive(snake_case , snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 702
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
def is_in_circle(snake_case : float , snake_case : float ) -> bool:
__UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def A_ ( snake_case : int , snake_case : Callable[[float], float] , snake_case : float = 0.0 , snake_case : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case , snake_case ) ) for _ in range(snake_case ) ) * (max_value - min_value)
def A_ ( snake_case : int , snake_case : float = 0.0 , snake_case : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case : float ) -> float:
return x
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , snake_case , snake_case )
__UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print('''******************''' )
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = DebertaVaTokenizer
UpperCAmelCase_ = DebertaVaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Any = DebertaVaTokenizer(_UpperCAmelCase, unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : str, _UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = "this is a test"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "this is a test"
return input_text, output_text
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = "<pad>"
SCREAMING_SNAKE_CASE__ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<pad>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "[PAD]" )
self.assertEqual(len(_UpperCAmelCase ), 3_0_0_0_1 )
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 3_0_0_0_0 )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = " \tHeLLo!how \n Are yoU? "
SCREAMING_SNAKE_CASE__ : Tuple = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
SCREAMING_SNAKE_CASE__ : int = DebertaVaTokenizer(_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def A_ ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : str = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Any = DebertaVaTokenizer(_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = DebertaVaTokenizerFast(_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : str = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Any = DebertaVaTokenizer(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaVaTokenizerFast(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ : List[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaVaTokenizer(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Tuple = DebertaVaTokenizer(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaVaTokenizerFast(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Dict ) -> int:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : str = " \tHeLLo!how \n Are yoU? "
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
SCREAMING_SNAKE_CASE__ : Any = DebertaVaTokenizer(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase, do_lower_case=_UpperCAmelCase, split_by_punct=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = "This is a test"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
SCREAMING_SNAKE_CASE__ : Dict = ["▁", "T", "his", "▁is", "▁a", "▁test"]
SCREAMING_SNAKE_CASE__ : str = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaVaTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = DebertaVaTokenizerFast(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE__ : Any = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
SCREAMING_SNAKE_CASE__ : Dict = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
SCREAMING_SNAKE_CASE__ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaVaTokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode("sequence builders" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode("multi-sequence build" )
SCREAMING_SNAKE_CASE__ : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], _UpperCAmelCase, )
@slow
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="microsoft/deberta-v2-xlarge", revision="ad6e42c1532ddf3a15c39246b63f5559d558b670", )
| 663
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663
| 1
|
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCAmelCase_ = len(__UpperCAmelCase )
UpperCAmelCase_ = max(__UpperCAmelCase )
UpperCAmelCase_ = min(__UpperCAmelCase )
# create the counting array
UpperCAmelCase_ = coll_max + 1 - coll_min
UpperCAmelCase_ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __UpperCAmelCase ):
UpperCAmelCase_ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase_ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __UpperCAmelCase ) ):
UpperCAmelCase_ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return "".join([chr(__UpperCAmelCase ) for i in counting_sort([ord(__UpperCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase_ = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 701
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="bert"
def __init__( self :Any , _lowercase :Optional[int]=30522 , _lowercase :str=768 , _lowercase :Union[str, Any]=12 , _lowercase :Dict=12 , _lowercase :Optional[Any]=3072 , _lowercase :List[Any]="gelu" , _lowercase :Dict=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Optional[int]=512 , _lowercase :List[str]=2 , _lowercase :List[str]=0.02 , _lowercase :Union[str, Any]=1E-1_2 , _lowercase :Dict=0 , _lowercase :List[str]="absolute" , _lowercase :Union[str, Any]=True , _lowercase :str=None , **_lowercase :Union[str, Any] , ) -> Dict:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a_ ( _snake_case ):
@property
def __a ( self :str) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 561
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = "Hello, World!"
lowerCAmelCase_ : Tuple = "en_XX"
def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : bool ) -> Optional[int]:
_a = Path("data_bin" )
_a = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A__ ).parent ) , checkpoint_file=Path(A__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(A__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(A__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(A__ )
_a = xmod.model.encoder.sentence_encoder
_a = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_a = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("Our X-MOD config:" , A__ )
_a = XmodForSequenceClassification(A__ ) if classification_head else XmodForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_a = xmod_sent_encoder.embed_tokens.weight
_a = xmod_sent_encoder.embed_positions.weight
_a = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_a = xmod_sent_encoder.layernorm_embedding.weight
_a = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_a = model.roberta.encoder.layer[i]
_a = xmod_sent_encoder.layers[i]
# self attention
_a = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
_a = xmod_layer.self_attn.q_proj.weight
_a = xmod_layer.self_attn.q_proj.bias
_a = xmod_layer.self_attn.k_proj.weight
_a = xmod_layer.self_attn.k_proj.bias
_a = xmod_layer.self_attn.v_proj.weight
_a = xmod_layer.self_attn.v_proj.bias
# self-attention output
_a = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
_a = xmod_layer.self_attn.out_proj.weight
_a = xmod_layer.self_attn.out_proj.bias
_a = xmod_layer.self_attn_layer_norm.weight
_a = xmod_layer.self_attn_layer_norm.bias
# intermediate
_a = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
_a = xmod_layer.fca.weight
_a = xmod_layer.fca.bias
# output
_a = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
_a = xmod_layer.fca.weight
_a = xmod_layer.fca.bias
_a = xmod_layer.final_layer_norm.weight
_a = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_a = xmod_layer.adapter_layer_norm.weight
_a = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_a = bert_output.adapter_modules[lang_code]
_a = xmod_layer.adapter_modules[lang_code]
_a = from_adapter.fca.weight
_a = from_adapter.fca.bias
_a = from_adapter.fca.weight
_a = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_a = xmod_sent_encoder.layer_norm.weight
_a = xmod_sent_encoder.layer_norm.bias
if classification_head:
_a = xmod.model.classification_heads["""mnli"""].dense.weight
_a = xmod.model.classification_heads["""mnli"""].dense.bias
_a = xmod.model.classification_heads["""mnli"""].out_proj.weight
_a = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_a = xmod.model.encoder.lm_head.dense.weight
_a = xmod.model.encoder.lm_head.dense.bias
_a = xmod.model.encoder.lm_head.layer_norm.weight
_a = xmod.model.encoder.lm_head.layer_norm.bias
_a = xmod.model.encoder.lm_head.weight
_a = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_a = xmod.encode(A__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A__ )
_a = model(A__ )[0]
if classification_head:
_a = xmod.model.classification_heads["""mnli"""](xmod.extract_features(A__ ) )
else:
_a = xmod.model(A__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_a = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
_a = torch.allclose(A__ , A__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowerCAmelCase_ : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 692
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[Any] = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''wav2vec2'''
def __init__( self : Any , A_ : str=32 , A_ : Dict=768 , A_ : int=12 , A_ : Tuple=12 , A_ : Optional[Any]=3_072 , A_ : List[Any]="gelu" , A_ : int=0.1 , A_ : Any=0.1 , A_ : Optional[int]=0.1 , A_ : Optional[Any]=0.0 , A_ : Union[str, Any]=0.0 , A_ : List[Any]=0.1 , A_ : List[Any]=0.1 , A_ : Union[str, Any]=0.02 , A_ : List[str]=1E-5 , A_ : Optional[int]="group" , A_ : str="gelu" , A_ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , A_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , A_ : int=(10, 3, 3, 3, 3, 2, 2) , A_ : str=False , A_ : Union[str, Any]=128 , A_ : str=16 , A_ : str=False , A_ : str=True , A_ : List[Any]=0.05 , A_ : List[Any]=10 , A_ : Any=2 , A_ : Optional[int]=0.0 , A_ : Dict=10 , A_ : Optional[int]=0 , A_ : int=320 , A_ : Optional[int]=2 , A_ : Tuple=0.1 , A_ : Optional[Any]=100 , A_ : Tuple=256 , A_ : Dict=256 , A_ : Union[str, Any]=0.1 , A_ : Tuple="sum" , A_ : Any=False , A_ : List[str]=False , A_ : Union[str, Any]=256 , A_ : Optional[Any]=(512, 512, 512, 512, 1_500) , A_ : List[Any]=(5, 3, 3, 1, 1) , A_ : Dict=(1, 2, 3, 1, 1) , A_ : Tuple=512 , A_ : List[str]=0 , A_ : Any=1 , A_ : Dict=2 , A_ : Optional[Any]=False , A_ : List[Any]=3 , A_ : str=2 , A_ : Tuple=3 , A_ : Dict=None , A_ : Tuple=None , **A_ : Any , ) -> Union[str, Any]:
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(A_ )
__snake_case = list(A_ )
__snake_case = list(A_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = do_stable_layer_norm
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case = num_codevectors_per_group
__snake_case = num_codevector_groups
__snake_case = contrastive_logits_temperature
__snake_case = feat_quantizer_dropout
__snake_case = num_negatives
__snake_case = codevector_dim
__snake_case = proj_codevector_dim
__snake_case = diversity_loss_weight
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
__snake_case = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(A_ )
__snake_case = list(A_ )
__snake_case = list(A_ )
__snake_case = xvector_output_dim
@property
def lowercase ( self : Optional[Any] ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 712
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase : str = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = ['''pixel_values''']
def __init__( self : Optional[int] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Dict , ) -> None:
super().__init__(**A_ )
__snake_case = size if size is not None else {'''shortest_edge''': 224}
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
__snake_case = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__snake_case = int((256 / 224) * size['''shortest_edge'''] )
__snake_case = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
__snake_case = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
A_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=A_ , data_format=A_ , **A_ )
def lowercase ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray:
__snake_case = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def lowercase ( self : List[str] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowercase ( self : Dict , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowercase ( self : Union[str, Any] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> BatchFeature:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(A_ , default_to_square=A_ )
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(A_ , param_name='''crop_size''' )
__snake_case = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(A_ ) for image in images]
if do_resize:
__snake_case = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(A_ , A_ , A_ ) for image in images]
__snake_case = [to_channel_dimension_format(A_ , A_ ) for image in images]
__snake_case = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 93
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase ,2 ) - pow(__UpperCamelCase ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase ,2 ) - pow(__UpperCamelCase ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase ,2 ) + pow(__UpperCamelCase ,2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Tuple = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( lowercase__ , unittest.TestCase ):
lowerCamelCase__: Tuple = DanceDiffusionPipeline
lowerCamelCase__: Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase__: Optional[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowerCamelCase__: str = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: List[Any] = False
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase: Optional[int] = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowercase , use_timestep_embedding=__lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
UpperCAmelCase: int = IPNDMScheduler()
UpperCAmelCase: Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def A__ ( self , __snake_case , __snake_case=0 ) -> Optional[int]:
"""simple docstring"""
if str(__lowercase ).startswith("mps" ):
UpperCAmelCase: Dict = torch.manual_seed(__lowercase )
else:
UpperCAmelCase: Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
UpperCAmelCase: Optional[int] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def A__ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase: str = self.get_dummy_components()
UpperCAmelCase: Dict = DanceDiffusionPipeline(**__lowercase )
UpperCAmelCase: Optional[Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase: List[str] = self.get_dummy_inputs(__lowercase )
UpperCAmelCase: List[Any] = pipe(**__lowercase )
UpperCAmelCase: Optional[int] = output.audios
UpperCAmelCase: Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCAmelCase: int = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def A__ ( self ) -> Dict:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def A__ ( self ) -> Any:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def A__ ( self ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: Dict = torch_device
UpperCAmelCase: Optional[Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
UpperCAmelCase: Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase: Dict = torch.manual_seed(0 )
UpperCAmelCase: Tuple = pipe(generator=__lowercase , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
UpperCAmelCase: Union[str, Any] = output.audios
UpperCAmelCase: str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase: str = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: str = torch_device
UpperCAmelCase: Dict = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
UpperCAmelCase: int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase: Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase: Tuple = pipe(generator=__lowercase , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
UpperCAmelCase: int = output.audios
UpperCAmelCase: int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCAmelCase: int = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 715
|
from collections import deque
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase: Tuple = process_name # process name
UpperCAmelCase: Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase: Tuple = arrival_time
UpperCAmelCase: List[Any] = burst_time # remaining burst time
UpperCAmelCase: Optional[Any] = 0 # total time of the process wait in ready queue
UpperCAmelCase: List[Any] = 0 # time from arrival time to completion time
class __lowerCamelCase :
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
UpperCAmelCase: str = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase: str = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase: Union[str, Any] = queue
# current time
UpperCAmelCase: Any = current_time
# finished process is in this sequence queue
UpperCAmelCase: deque[Process] = deque()
def A__ ( self ) -> list[str]:
"""simple docstring"""
UpperCAmelCase: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: List[Any] = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
UpperCAmelCase: Tuple = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A__ ( self , __snake_case ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def A__ ( self , __snake_case ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A__ ( self , __snake_case ) -> deque[Process]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
UpperCAmelCase: Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCAmelCase: Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase: str = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A__ ( self , __snake_case , __snake_case ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
UpperCAmelCase: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
UpperCAmelCase: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase: Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase: Optional[Any] = 0
# set the finish time
UpperCAmelCase: Dict = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase: Optional[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ : Tuple = Process('P1', 0, 5_3)
snake_case_ : List[str] = Process('P2', 0, 1_7)
snake_case_ : Optional[Any] = Process('P3', 0, 6_8)
snake_case_ : int = Process('P4', 0, 2_4)
snake_case_ : Optional[Any] = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
snake_case_ : List[Any] = Process('P1', 0, 5_3)
snake_case_ : Optional[Any] = Process('P2', 0, 1_7)
snake_case_ : Optional[int] = Process('P3', 0, 6_8)
snake_case_ : List[Any] = Process('P4', 0, 2_4)
snake_case_ : Tuple = 3
snake_case_ : Union[str, Any] = [1_7, 2_5]
snake_case_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
snake_case_ : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 166
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__snake_case = False
if num < 0:
__snake_case = True
__snake_case = -num
__snake_case = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683
| 0
|
import sys
_snake_case = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase_ ( A : str ):
"""simple docstring"""
lowerCAmelCase_ = 1
for digit in s:
product *= int(A )
return product
def lowerCamelCase_ ( A : str = N ):
"""simple docstring"""
lowerCAmelCase_ = -sys.maxsize - 1
lowerCAmelCase_ = n[:13]
lowerCAmelCase_ = 13
while cur_index < len(A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase_ = max(A , str_eval(A ) )
lowerCAmelCase_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 413
|
_snake_case = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 413
| 1
|
'''simple docstring'''
import numpy as np
import qiskit
def _a ( _lowerCamelCase = 8 , _lowerCamelCase = None ) -> str:
"""simple docstring"""
__snake_case : Any = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case : str = rng.integers(2 , size=_lowerCamelCase )
# The set of states Alice will prepare.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
__snake_case : Dict = qiskit.QuantumCircuit(_lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
__snake_case : str = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case : Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case : Any = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 26
|
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26
| 1
|
import numpy
# List of input, output pairs
lowerCAmelCase__ : int = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ : Tuple = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowerCAmelCase__ : Dict = [2, 4, 1, 5]
lowerCAmelCase__ : Optional[Any] = len(train_data)
lowerCAmelCase__ : Optional[Any] = 0.0_09
def UpperCamelCase__ ( A__ , A__="train" ) -> Any:
return calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - output(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : Tuple = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase__ ( A__ , A__ ) -> Dict:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase__ ( A__ , A__=m ) -> List[str]:
snake_case__ : List[Any] = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(_SCREAMING_SNAKE_CASE )
else:
summation_value += _error(_SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def UpperCamelCase__ ( A__ ) -> int:
snake_case__ : Any = summation_of_cost_derivative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def UpperCamelCase__ ( ) -> Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case__ : Union[str, Any] = 0.0_0_0_0_0_2
snake_case__ : int = 0
snake_case__ : str = 0
while True:
j += 1
snake_case__ : List[Any] = [0, 0, 0, 0]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
snake_case__ : Dict = get_cost_derivative(i - 1 )
snake_case__ : str = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE , rtol=_SCREAMING_SNAKE_CASE , ):
break
snake_case__ : Tuple = temp_parameter_vector
print(('Number of iterations:', j) )
def UpperCamelCase__ ( ) -> Tuple:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
print(('Actual output value:', output(_SCREAMING_SNAKE_CASE , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 707
|
from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
)
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : str = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCAmelCase__ : Optional[Any] = math.log(len(__UpperCamelCase ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 65
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''ClapFeatureExtractor'''
UpperCAmelCase_ : List[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase)
if audios is not None:
lowerCAmelCase = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase)
if text is not None and audios is not None:
lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase) , tensor_type=__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 707
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 605
| 0
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _a ( lowerCamelCase = "laptop" ):
lowerCamelCase : Union[str, Any] = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCamelCase : Optional[int] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCamelCase : Union[str, Any] = BeautifulSoup(requests.get(lowerCamelCase, headers=lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase : Any = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""", attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""}, ), soup.find_all("""div""", attrs={"""class""": """a-row a-size-base a-color-base"""} ), ):
try:
lowerCamelCase : List[str] = item.ha.text
lowerCamelCase : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCamelCase : List[str] = item.find("""span""", attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCamelCase : Dict = item.find("""span""", attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCamelCase : Optional[Any] = """Not available"""
try:
lowerCamelCase : Dict = (
"""₹"""
+ item.find(
"""span""", attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCamelCase : List[str] = """"""
try:
lowerCamelCase : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""", """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
)
* 100 )
except ValueError:
lowerCamelCase : int = float("""nan""" )
except AttributeError:
pass
lowerCamelCase : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase : Union[str, Any] = """ """
lowerCamelCase : int = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCamelCase ="""headphones"""
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 681
|
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681
| 1
|
import qiskit
def UpperCAmelCase__ ( lowerCamelCase_ : int = 2 ):
__a : Optional[Any] = qubits
# Using Aer's simulator
__a : List[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__a : List[Any] = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__a : str = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=1_0_0_0 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}")
| 577
|
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
| 1
|
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : list[list[int | float]] ):
'''simple docstring'''
__magic_name__ = len(lowerCamelCase_ )
__magic_name__ = len(matrix[0] )
__magic_name__ = min(lowerCamelCase_ , lowerCamelCase_ )
for row in range(lowerCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase_ ):
__magic_name__ = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ = True
for i in range(row + 1 , lowerCamelCase_ ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ = matrix[i], matrix[row]
__magic_name__ = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase_ ):
__magic_name__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def UpperCamelCase_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : str = state_dict.pop(_UpperCAmelCase )
__magic_name__ : Tuple = val
def UpperCamelCase_ ( lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ : Any = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__magic_name__ : int = value
else:
__magic_name__ : List[Any] = value
return new_state_dict
def UpperCamelCase_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[str]=False ) -> List[Any]:
"""simple docstring"""
__magic_name__ : int = ''''''
if is_panoptic:
__magic_name__ : Dict = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__magic_name__ : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : List[Any] = in_proj_weight[:256, :]
__magic_name__ : Optional[int] = in_proj_bias[:256]
__magic_name__ : int = in_proj_weight[256:512, :]
__magic_name__ : Union[str, Any] = in_proj_bias[256:512]
__magic_name__ : str = in_proj_weight[-256:, :]
__magic_name__ : Optional[int] = in_proj_bias[-256:]
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__magic_name__ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__magic_name__ : Union[str, Any] = '''resnet101'''
if "dc5" in model_name:
__magic_name__ : Optional[int] = True
__magic_name__ : Tuple = '''panoptic''' in model_name
if is_panoptic:
__magic_name__ : Union[str, Any] = 250
else:
__magic_name__ : Tuple = 91
__magic_name__ : Optional[int] = '''huggingface/label-files'''
__magic_name__ : Union[str, Any] = '''coco-detection-id2label.json'''
__magic_name__ : int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ : Any = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__magic_name__ : Any = idalabel
__magic_name__ : str = {v: k for k, v in idalabel.items()}
# load image processor
__magic_name__ : Optional[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__magic_name__ : List[str] = ConditionalDetrImageProcessor(format=_UpperCAmelCase )
# prepare image
__magic_name__ : Tuple = prepare_img()
__magic_name__ : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__magic_name__ : Optional[int] = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
__magic_name__ : List[Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , _UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
__magic_name__ : List[str] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__magic_name__ : Any = '''conditional_detr.''' + src
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__magic_name__ : Dict = rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ : Tuple = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__magic_name__ : int = state_dict.pop(_UpperCAmelCase )
__magic_name__ : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__magic_name__ : Dict = state_dict.pop(_UpperCAmelCase )
__magic_name__ : Tuple = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__magic_name__ : List[Any] = state_dict.pop(_UpperCAmelCase )
__magic_name__ : Optional[Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__magic_name__ : List[str] = state_dict.pop(_UpperCAmelCase )
__magic_name__ : List[Any] = val
# finally, create HuggingFace model and load state dict
__magic_name__ : Dict = ConditionalDetrForSegmentation(_UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=_UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__magic_name__ : List[str] = conditional_detr(_UpperCAmelCase )
__magic_name__ : Tuple = model(_UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 716
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def _UpperCAmelCase ( self : Dict , snake_case : List[Any] , snake_case : Dict , snake_case : int=False ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__magic_name__ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : Tuple , snake_case : int=13 , snake_case : Any=7 , snake_case : str=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Any=True , snake_case : List[Any]=99 , snake_case : Any=32 , snake_case : List[str]=32 , snake_case : Union[str, Any]=2 , snake_case : Union[str, Any]=4 , snake_case : List[Any]=37 , snake_case : Tuple="gelu" , snake_case : str=0.1 , snake_case : Dict=0.1 , snake_case : List[Any]=512 , snake_case : Dict=16 , snake_case : int=2 , snake_case : Union[str, Any]=0.02 , snake_case : Optional[Any]=3 , snake_case : int=4 , snake_case : Dict=None , ) -> int:
'''simple docstring'''
__magic_name__ : Dict = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Optional[Any] = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : Optional[Any] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : str = max_position_embeddings
__magic_name__ : Union[str, Any] = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : int = initializer_range
__magic_name__ : int = num_labels
__magic_name__ : Union[str, Any] = num_choices
__magic_name__ : List[Any] = scope
__magic_name__ : str = embedding_size
def _UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Any = None
if self.use_token_type_ids:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : str = None
__magic_name__ : Tuple = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Union[str, Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Dict , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = TFMobileBertModel(config=snake_case )
__magic_name__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Dict = model(snake_case )
__magic_name__ : str = [input_ids, input_mask]
__magic_name__ : List[str] = model(snake_case )
__magic_name__ : Tuple = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[Any] , snake_case : int , snake_case : int , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : List[str] ) -> str:
'''simple docstring'''
__magic_name__ : Dict = TFMobileBertForMaskedLM(config=snake_case )
__magic_name__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str , snake_case : Tuple , snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case )
__magic_name__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : str = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self : Any , snake_case : str , snake_case : List[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Dict = TFMobileBertForPreTraining(config=snake_case )
__magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Optional[int] = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.num_labels
__magic_name__ : List[Any] = TFMobileBertForSequenceClassification(config=snake_case )
__magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Dict = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = self.num_choices
__magic_name__ : Dict = TFMobileBertForMultipleChoice(config=snake_case )
__magic_name__ : Optional[int] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ : str = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Any , snake_case : Dict , snake_case : str ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Tuple = self.num_labels
__magic_name__ : int = TFMobileBertForTokenClassification(config=snake_case )
__magic_name__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[int] , snake_case : Tuple , snake_case : List[str] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : int = TFMobileBertForQuestionAnswering(config=snake_case )
__magic_name__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ : Any = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def _UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def _UpperCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def _UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def _UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def _UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def _UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
__magic_name__ : str = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : List[str] = model(snake_case )[0]
__magic_name__ : Tuple = [1, 6, 3_0522]
self.assertEqual(output.shape , snake_case )
__magic_name__ : Union[str, Any] = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 147
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCAmelCase : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _A ( snake_case__ : Union[str, Any] ):
snake_case__ : Tuple = torch.load(__snake_case , map_location='''cpu''' )
return sd
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple=rename_keys_prefix ):
snake_case__ : Optional[int] = OrderedDict()
snake_case__ : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case__ : List[str] = key
for name_pair in rename_keys_prefix:
snake_case__ : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
snake_case__ : List[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case__ : str = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _A ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
snake_case__ : Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case__ : str = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
snake_case__ : Dict = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
snake_case__ : Dict = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
snake_case__ : List[str] = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
snake_case__ : str = {'''visual_embedding_dim''': 5_12}
snake_case__ : List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case__ : Dict = {'''visual_embedding_dim''': 20_48}
snake_case__ : Tuple = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case__ : Any = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
snake_case__ : List[Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case__ : Optional[int] = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
snake_case__ : str = '''nlvr'''
snake_case__ : List[Any] = VisualBertConfig(**__snake_case )
# Load State Dict
snake_case__ : Union[str, Any] = load_state_dict(__snake_case )
snake_case__ : List[Any] = get_new_dict(__snake_case , __snake_case )
if model_type == "pretraining":
snake_case__ : List[Any] = VisualBertForPreTraining(__snake_case )
elif model_type == "vqa":
snake_case__ : Union[str, Any] = VisualBertForQuestionAnswering(__snake_case )
elif model_type == "nlvr":
snake_case__ : Optional[int] = VisualBertForVisualReasoning(__snake_case )
elif model_type == "multichoice":
snake_case__ : int = VisualBertForMultipleChoice(__snake_case )
model.load_state_dict(__snake_case )
# Save Checkpoints
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 261
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' ,[
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__snake_case ,i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' ,[
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = _split_gen_kwargs(__snake_case ,__snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' ,[
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
lowerCamelCase__ = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 481
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase__ : List[Any] = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = "▁"
UpperCamelCase__ : Any = {"vocab_file": "spiece.model"}
UpperCamelCase__ : int = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
UpperCamelCase__ : Optional[int] = {
"google/reformer-crime-and-punishment": 524_288,
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , A__ , A__="</s>" , A__="<unk>" , A__=[] , A__ = None , **A__ , ) -> None:
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A__ , unk_token=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCamelCase ( self ) -> Dict[str, int]:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if index < self.sp_model.get_piece_size():
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A__ )
return token
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 0
| 1
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
assert x is not None
assert y is not None
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
# declaring the array for storing the dp values
lowercase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowercase__ = 1 if x[i - 1] == y[j - 1] else 0
lowercase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowercase__ = ''
lowercase__ , lowercase__ = m, n
while i > 0 and j > 0:
lowercase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowercase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowercase_ = """AGGTAB"""
lowercase_ = """GXTXAYB"""
lowercase_ = 4
lowercase_ = """GTAB"""
lowercase_ , lowercase_ = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 235
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase_ = """
Human: <<task>>
Assistant: """
lowercase_ = """huggingface-tools/default-prompts"""
lowercase_ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
lowercase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
lowercase__ = cached_file(
_SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 235
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int ):
"""simple docstring"""
if len(lowerCamelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase_ , n - 1 )
rec_insertion_sort(lowerCamelCase_ , n - 1 )
def _lowerCamelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int ):
"""simple docstring"""
if index >= len(lowerCamelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase_ , index + 1 )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input('''Enter integers separated by spaces: ''')
snake_case__ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 389
|
'''simple docstring'''
from collections import defaultdict
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = first_str.lower().strip()
UpperCAmelCase_ : int = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Optional[Any] = first_str.replace(' ' , '' )
UpperCAmelCase_ : List[str] = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(lowerCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ : str = input('''Enter the first string ''').strip()
snake_case__ : Union[str, Any] = input('''Enter the second string ''').strip()
snake_case__ : List[str] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 389
| 1
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : Dict = []
for i in range(SCREAMING_SNAKE_CASE ):
_lowercase : str = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class lowerCAmelCase_ ( __snake_case , __snake_case ):
_UpperCamelCase : Optional[int] = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_0_0_0 , _lowerCAmelCase = 0.00_01 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('set_alpha_to_one' , _lowerCAmelCase ) is not None:
_lowercase : Optional[int] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowercase : str = kwargs['set_alpha_to_one']
if trained_betas is not None:
_lowercase : List[str] = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : str = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : str = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Dict = 1.0 - self.betas
_lowercase : Tuple = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowercase : Union[str, Any] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowercase : Tuple = 1.0
# setable values
_lowercase : List[str] = None
_lowercase : int = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
_lowercase : int = num_inference_steps
_lowercase : int = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowercase : Optional[int] = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowercase : str = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowercase : List[str] = self.alphas_cumprod[timestep]
_lowercase : Tuple = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowercase : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowercase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowercase : List[Any] = model_output
elif self.config.prediction_type == "sample":
_lowercase : Optional[int] = model_output
_lowercase : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowercase : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowercase : List[Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowercase : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : str = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 66
|
lowercase__ : Union[str, Any] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 312
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase = 4
UpperCAmelCase = 3
class UpperCAmelCase_ ( _lowercase):
pass
def lowercase ( a__ : List[str] ) -> Tuple:
for shard in shards:
for i in range(a__ ):
yield {"i": i, "shard": shard}
def lowercase ( ) -> Dict:
_UpperCamelCase = int(os.environ['''RANK'''] )
_UpperCamelCase = int(os.environ['''WORLD_SIZE'''] )
_UpperCamelCase = ArgumentParser()
parser.add_argument('''--streaming''' , type=a__ )
parser.add_argument('''--local_rank''' , type=a__ )
parser.add_argument('''--num_workers''' , type=a__ , default=0 )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.streaming
_UpperCamelCase = args.num_workers
_UpperCamelCase = {'''shards''': [F'''shard_{shard_idx}''' for shard_idx in range(a__ )]}
_UpperCamelCase = IterableDataset.from_generator(a__ , gen_kwargs=a__ )
if not streaming:
_UpperCamelCase = Dataset.from_list(list(a__ ) )
_UpperCamelCase = split_dataset_by_node(a__ , rank=a__ , world_size=a__ )
_UpperCamelCase = torch.utils.data.DataLoader(a__ , num_workers=a__ )
_UpperCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_UpperCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_UpperCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 342
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCAmelCase = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
UpperCAmelCase = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ConvBertTokenizer
def __init__( self : Union[str, Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[int]="[UNK]" , __UpperCamelCase : Tuple="[SEP]" , __UpperCamelCase : Tuple="[PAD]" , __UpperCamelCase : Tuple="[CLS]" , __UpperCamelCase : Any="[MASK]" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Optional[Any] , ) -> Any:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCamelCase ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(__UpperCamelCase , normalizer_state.pop('''type''' ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**__UpperCamelCase )
_UpperCamelCase = do_lower_case
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=None ) -> str:
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 342
| 1
|
'''simple docstring'''
def __a ( A__ = 1000 ) -> int:
lowerCAmelCase = 3
lowerCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 649
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 649
| 1
|
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : str ,A_ : str ) -> Tuple:
A , A = text, pattern
A , A = len(_lowercase ), len(_lowercase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ) -> Dict:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Tuple:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1 ):
A = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
A = self.match_in_pattern(self.text[mismatch_index] )
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase = "ABAABA"
_lowercase = "AB"
_lowercase = BoyerMooreSearch(text, pattern)
_lowercase = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 702
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12
| 22
| 0
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" ) -> int:
_a : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
if split_mlp_wi:
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : Optional[int] = (wi_a, wi_a)
else:
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Any:
_a : Dict = traverse_util.flatten_dict(variables['target'] )
_a : Tuple = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : int = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase_ )
_a : str = collections.OrderedDict()
# Shared embeddings.
_a : List[str] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_attention_layer_norm' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Dict = o.T
_a : int = q.T
_a : List[str] = v.T
# Block i, layer 1 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_mlp_layer_norm' )
_a , _a : Any = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , lowerCAmelCase_ )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : Tuple = wi[0].T
_a : List[str] = wi[1].T
else:
_a : Union[str, Any] = wi.T
_a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' ).T
_a : Optional[Any] = old['encoder/encoder_norm/scale']
if not scalable_attention:
_a : Union[str, Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'encoder' ).T
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_self_attention_layer_norm' )
_a , _a , _a , _a : Union[str, Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'self_attention' )
_a : str = layer_norm
_a : List[Any] = k.T
_a : Union[str, Any] = o.T
_a : int = q.T
_a : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_a : List[Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
_a , _a , _a , _a : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'encoder_decoder_attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Union[str, Any] = o.T
_a : Any = q.T
_a : str = v.T
# Block i, layer 2 (MLP).
_a : str = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_mlp_layer_norm' )
_a , _a : int = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , lowerCAmelCase_ )
_a : List[str] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : Union[str, Any] = wi[1].T
else:
_a : Optional[Any] = wi.T
_a : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' ).T
_a : List[Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Union[str, Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_a : Optional[Any] = state_dict['shared.weight']
return state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Any = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
_a : str = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
_a : Dict = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> str:
_a : int = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(lowerCAmelCase_ )
else:
_a : Optional[int] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('Done' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 358
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" ) -> int:
_a : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
if split_mlp_wi:
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : Optional[int] = (wi_a, wi_a)
else:
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Any:
_a : Dict = traverse_util.flatten_dict(variables['target'] )
_a : Tuple = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : int = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase_ )
_a : str = collections.OrderedDict()
# Shared embeddings.
_a : List[str] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_attention_layer_norm' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Dict = o.T
_a : int = q.T
_a : List[str] = v.T
# Block i, layer 1 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_mlp_layer_norm' )
_a , _a : Any = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , lowerCAmelCase_ )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : Tuple = wi[0].T
_a : List[str] = wi[1].T
else:
_a : Union[str, Any] = wi.T
_a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' ).T
_a : Optional[Any] = old['encoder/encoder_norm/scale']
if not scalable_attention:
_a : Union[str, Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'encoder' ).T
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_self_attention_layer_norm' )
_a , _a , _a , _a : Union[str, Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'self_attention' )
_a : str = layer_norm
_a : List[Any] = k.T
_a : Union[str, Any] = o.T
_a : int = q.T
_a : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_a : List[Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
_a , _a , _a , _a : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'encoder_decoder_attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Union[str, Any] = o.T
_a : Any = q.T
_a : str = v.T
# Block i, layer 2 (MLP).
_a : str = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_mlp_layer_norm' )
_a , _a : int = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , lowerCAmelCase_ )
_a : List[str] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : Union[str, Any] = wi[1].T
else:
_a : Optional[Any] = wi.T
_a : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' ).T
_a : List[Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Union[str, Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_a : Optional[Any] = state_dict['shared.weight']
return state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Any = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
_a : str = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
_a : Dict = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> str:
_a : int = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(lowerCAmelCase_ )
else:
_a : Optional[int] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('Done' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 358
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class A__:
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=5_12 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__SCREAMING_SNAKE_CASE , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FalconModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FalconModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FalconForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FalconForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FalconModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__SCREAMING_SNAKE_CASE = alibi
self.model_tester.create_and_check_model(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = FalconForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = '''single_label_classification'''
__SCREAMING_SNAKE_CASE = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = FalconForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE = FalconForCausalLM(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = model._convert_to_rw_cache(result.past_key_values )
__SCREAMING_SNAKE_CASE = model._convert_cache_to_standard_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for layer in range(len(__SCREAMING_SNAKE_CASE ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE = FalconForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__SCREAMING_SNAKE_CASE , '''use_cache''' ):
return
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
if "use_cache" not in inputs:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__SCREAMING_SNAKE_CASE = (
getattr(__SCREAMING_SNAKE_CASE , '''decoder_layers''' , __SCREAMING_SNAKE_CASE )
or getattr(__SCREAMING_SNAKE_CASE , '''num_decoder_layers''' , __SCREAMING_SNAKE_CASE )
or config.num_hidden_layers
)
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , '''num_kv_heads''' , config.num_attention_heads )
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , '''d_model''' , config.hidden_size )
__SCREAMING_SNAKE_CASE = embed_dim // num_attention_heads
__SCREAMING_SNAKE_CASE = outputs['''past_key_values''']
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = inputs['''input_ids'''].shape
for i in range(__SCREAMING_SNAKE_CASE ):
if config.new_decoder_architecture:
__SCREAMING_SNAKE_CASE = config.num_attention_heads
elif config.multi_query:
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class A__( unittest.TestCase ):
@slow
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , max_new_tokens=19 )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
model.eval()
model.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**__SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4 )
@slow
def _a ( self : Tuple ) -> int:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
model.eval()
model.to(device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# Test results are the same with and without cache
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=__SCREAMING_SNAKE_CASE )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 690
|
"""simple docstring"""
def _a ( UpperCAmelCase__ = 10**9 ) -> int:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 690
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
snake_case_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> Optional[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
__snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
__snake_case , __snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
else:
__snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
__snake_case , __snake_case = ProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
__snake_case = ['''key_proj''', '''value_proj''', '''query_proj''']
__snake_case = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__snake_case = key.split('''.''' )
if attributes[0] == "lm_head":
__snake_case = prophet
__snake_case = prophet_old
else:
__snake_case = prophet.prophetnet
__snake_case = prophet_old.model
__snake_case = False
for attribute in attributes:
if attribute in mapping:
__snake_case = mapping[attribute]
if not hasattr(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0:
__snake_case = attribute
elif hasattr(snake_case_ , snake_case_ ):
__snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__snake_case = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__snake_case = old_model.bias
logger.info(f"""{attribute} is initialized""" )
__snake_case = True
break
elif attribute in special_keys and hasattr(snake_case_ , '''in_proj_weight''' ):
__snake_case = old_model.in_proj_weight.shape[0] // 3
__snake_case = getattr(snake_case_ , snake_case_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__snake_case = True
break
if attribute.isdigit():
__snake_case = model[int(snake_case_ )]
__snake_case = old_model[int(snake_case_ )]
else:
__snake_case = getattr(snake_case_ , snake_case_ )
if old_attribute == "":
__snake_case = old_model
else:
if not hasattr(snake_case_ , snake_case_ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__snake_case = getattr(snake_case_ , snake_case_ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 592
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 592
| 1
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case__ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return max(metric_fn(lowerCamelCase_ , lowerCamelCase_ ) for gt in ground_truths )
def _lowerCamelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCAmelCase_ : int = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
UpperCAmelCase_ : Any = []
if args.gold_data_mode == "qa":
UpperCAmelCase_ : str = pd.read_csv(lowerCamelCase_ , sep='\t' , header=lowerCamelCase_ )
for answer_list in data[1]:
UpperCAmelCase_ : int = ast.literal_eval(lowerCamelCase_ )
answers.append(lowerCamelCase_ )
else:
UpperCAmelCase_ : Optional[int] = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
UpperCAmelCase_ : str = [[reference] for reference in references]
UpperCAmelCase_ : Union[str, Any] = 0
for prediction, ground_truths in zip(lowerCamelCase_ , lowerCamelCase_ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
fa += metric_max_over_ground_truths(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = 100.0 * em / total
UpperCAmelCase_ : Optional[int] = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = args.k
UpperCAmelCase_ : Dict = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
UpperCAmelCase_ : str = [line.strip() for line in open(lowerCamelCase_ , 'r' ).readlines()]
UpperCAmelCase_ : int = 0
for hypo, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = set(hypo.split('\t' )[:k] )
UpperCAmelCase_ : List[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase_ : int = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
def strip_title(lowerCamelCase_ : List[Any] ):
if title.startswith('"' ):
UpperCAmelCase_ : List[str] = title[1:]
if title.endswith('"' ):
UpperCAmelCase_ : List[str] = title[:-1]
return title
UpperCAmelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , )['input_ids'].to(args.device )
UpperCAmelCase_ : Dict = rag_model.rag.question_encoder(lowerCamelCase_ )
UpperCAmelCase_ : str = question_enc_outputs[0]
UpperCAmelCase_ : List[Any] = rag_model.retriever(
lowerCamelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
UpperCAmelCase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase_ : str = []
for docs in all_docs:
UpperCAmelCase_ : str = [strip_title(lowerCamelCase_ ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase_ ) )
return provenance_strings
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = inputs_dict.input_ids.to(args.device )
UpperCAmelCase_ : Tuple = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase_ : int = rag_model.generate( # rag_model overwrites generate
lowerCamelCase_ , attention_mask=lowerCamelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCAmelCase_ : Optional[Any] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
if args.print_predictions:
for q, a in zip(lowerCamelCase_ , lowerCamelCase_ ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase_ , lowerCamelCase_ ) )
return answers
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase_ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase_ , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase_ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase_ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase_ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase_ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase_ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase_ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase_ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase_ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=lowerCamelCase_ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
UpperCAmelCase_ : str = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = {}
if args.model_type is None:
UpperCAmelCase_ : int = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
UpperCAmelCase_ : Optional[int] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
UpperCAmelCase_ : Optional[int] = args.n_docs
if args.index_name is not None:
UpperCAmelCase_ : List[Any] = args.index_name
if args.index_path is not None:
UpperCAmelCase_ : Dict = args.index_path
else:
UpperCAmelCase_ : Dict = BartForConditionalGeneration
UpperCAmelCase_ : Optional[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
UpperCAmelCase_ : Any = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase_ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
UpperCAmelCase_ : Union[str, Any] = RagRetriever.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model_class.from_pretrained(lowerCamelCase_ , retriever=lowerCamelCase_ , **lowerCamelCase_ )
model.retriever.init_retrieval()
else:
UpperCAmelCase_ : Any = model_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
UpperCAmelCase_ : Optional[Any] = []
for line in tqdm(lowerCamelCase_ ):
questions.append(line.strip() )
if len(lowerCamelCase_ ) == args.eval_batch_size:
UpperCAmelCase_ : List[Any] = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('\n'.join(lowerCamelCase_ ) + '\n' )
preds_file.flush()
UpperCAmelCase_ : Any = []
if len(lowerCamelCase_ ) > 0:
UpperCAmelCase_ : Optional[Any] = evaluate_batch_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
preds_file.write('\n'.join(lowerCamelCase_ ) )
preds_file.flush()
score_fn(lowerCamelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case__ : Optional[int] = get_args()
main(args)
| 707
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
snake_case__ : List[Any] = [8, 5, 9, 7]
snake_case__ : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case__ : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = claim_vector
UpperCAmelCase_ : int = allocated_resources_table
UpperCAmelCase_ : Tuple = maximum_claim_table
def _UpperCamelCase ( self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _UpperCamelCase ( self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _UpperCamelCase ( self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _UpperCamelCase ( self ):
'''simple docstring'''
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.__need()
UpperCAmelCase_ : int = self.__allocated_resources_table
UpperCAmelCase_ : Any = self.__available_resources()
UpperCAmelCase_ : Any = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
UpperCAmelCase_ : Optional[Any] = False
for each_need in need_list:
UpperCAmelCase_ : Dict = True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
UpperCAmelCase_ : str = False
break
if execution:
UpperCAmelCase_ : Optional[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase_ : int = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
UpperCAmelCase_ : Dict = np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def _UpperCamelCase ( self ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(snake_case_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(snake_case_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 389
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ = 16
snake_case_ = 32
def __lowercase (_SCREAMING_SNAKE_CASE :Accelerator , _SCREAMING_SNAKE_CASE :int = 16 , _SCREAMING_SNAKE_CASE :str = "bert-base-cased" ):
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE :Tuple ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE :Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(lowercase_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
SCREAMING_SNAKE_CASE : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[int] ):
model.eval()
SCREAMING_SNAKE_CASE : List[str] = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowercase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
SCREAMING_SNAKE_CASE : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
return eval_metric["accuracy"]
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Tuple ):
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Tuple = config['''lr''']
SCREAMING_SNAKE_CASE : int = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE : Tuple = args.model_name_or_path
set_seed(lowercase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE : List[Any] = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : List[Any] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
SCREAMING_SNAKE_CASE : List[str] = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE : int = args.resume_from_checkpoint.split('''epoch_''' )[1]
SCREAMING_SNAKE_CASE : Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE : Optional[Any] = int(lowercase_ ) + 1
SCREAMING_SNAKE_CASE : Dict = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print('''resumed checkpoint performance:''' , lowercase_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase_ )
SCREAMING_SNAKE_CASE : List[str] = outputs.loss
SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE : List[Any] = F'''epoch_{epoch}'''
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
SCREAMING_SNAKE_CASE : List[Any] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE : Tuple = accuracy
SCREAMING_SNAKE_CASE : Optional[int] = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE : Any = optimizer.param_groups[0]['''lr''']
SCREAMING_SNAKE_CASE : List[str] = epoch
SCREAMING_SNAKE_CASE : Any = overall_step
accelerator.print(F'''epoch {epoch}:''' , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
def __lowercase ():
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowercase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase_ , )
parser.add_argument(
'''--output_dir''' , type=lowercase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase_ , default=lowercase_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=lowercase_ , default=lowercase_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase_ , default=2 , help='''Number of train epochs.''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 507
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[8, 1_6, 3_2, 6_4] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=["stage2", "stage3", "stage4"] , lowerCamelCase__=[2, 3, 4] , lowerCamelCase__=1 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = out_features
_lowerCamelCase = out_indices
_lowerCamelCase = num_groups
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase = None
_lowerCamelCase = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase__ : Any = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = False
lowercase__ : Any = False
lowercase__ : List[str] = False
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase = layer_type
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowercase__ : Tuple = BitConfig
lowercase__ : Any = False
def snake_case__ ( self ):
_lowerCamelCase = BitModelTester(self )
| 661
| 0
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__(self : List[Any], __UpperCAmelCase : float, __UpperCAmelCase : Callable, __UpperCAmelCase : int, __UpperCAmelCase : float = 1.0, __UpperCAmelCase : str = None, ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = initial_learning_rate
SCREAMING_SNAKE_CASE : int = warmup_steps
SCREAMING_SNAKE_CASE : int = power
SCREAMING_SNAKE_CASE : Tuple = decay_schedule_fn
SCREAMING_SNAKE_CASE : Union[str, Any] = name
def __call__(self : Dict, __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE : Tuple = tf.cast(__UpperCAmelCase, tf.floataa )
SCREAMING_SNAKE_CASE : Any = tf.cast(self.warmup_steps, tf.floataa )
SCREAMING_SNAKE_CASE : Any = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE : Tuple = self.initial_learning_rate * tf.math.pow(__UpperCAmelCase, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=__UpperCAmelCase, )
def lowercase__ (self : Tuple ) -> str:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowercase (_SCREAMING_SNAKE_CASE :float , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :float = 0.0 , _SCREAMING_SNAKE_CASE :float = 0.9 , _SCREAMING_SNAKE_CASE :float = 0.999 , _SCREAMING_SNAKE_CASE :float = 1E-8 , _SCREAMING_SNAKE_CASE :Optional[float] = None , _SCREAMING_SNAKE_CASE :Optional[float] = None , _SCREAMING_SNAKE_CASE :float = 0.0 , _SCREAMING_SNAKE_CASE :float = 1.0 , _SCREAMING_SNAKE_CASE :Optional[List[str]] = None , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE : Any = WarmUp(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_schedule_fn=_SCREAMING_SNAKE_CASE , warmup_steps=_SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE : Optional[Any] = AdamWeightDecay(
learning_rate=_SCREAMING_SNAKE_CASE , weight_decay_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_SCREAMING_SNAKE_CASE , )
else:
SCREAMING_SNAKE_CASE : int = tf.keras.optimizers.Adam(
learning_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a__ ( _lowercase ):
def __init__(self : str, __UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001, __UpperCAmelCase : float = 0.9, __UpperCAmelCase : float = 0.999, __UpperCAmelCase : float = 1e-7, __UpperCAmelCase : bool = False, __UpperCAmelCase : float = 0.0, __UpperCAmelCase : Optional[List[str]] = None, __UpperCAmelCase : Optional[List[str]] = None, __UpperCAmelCase : str = "AdamWeightDecay", **__UpperCAmelCase : Tuple, ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = weight_decay_rate
SCREAMING_SNAKE_CASE : Dict = include_in_weight_decay
SCREAMING_SNAKE_CASE : Optional[Any] = exclude_from_weight_decay
@classmethod
def lowercase__ (cls : Union[str, Any], __UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {'''WarmUp''': WarmUp}
return super(__UpperCAmelCase, cls ).from_config(__UpperCAmelCase, custom_objects=__UpperCAmelCase )
def lowercase__ (self : str, __UpperCAmelCase : List[str], __UpperCAmelCase : List[str], __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
super(__UpperCAmelCase, self )._prepare_local(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = tf.constant(
self.weight_decay_rate, name='''adam_weight_decay_rate''' )
def lowercase__ (self : Optional[Any], __UpperCAmelCase : Any, __UpperCAmelCase : Any, __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''], use_locking=self._use_locking, )
return tf.no_op()
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : int, __UpperCAmelCase : List[str]=None, **__UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list(zip(*__UpperCAmelCase ) )
return super(__UpperCAmelCase, self ).apply_gradients(zip(__UpperCAmelCase, __UpperCAmelCase ), name=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Tuple, __UpperCAmelCase : int, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE : List[Any] = apply_state or {}
SCREAMING_SNAKE_CASE : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self._fallback_apply_state(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase__ (self : Tuple, __UpperCAmelCase : int, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self._get_lr(var.device, var.dtype.base_dtype, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = self._decay_weights_op(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(__UpperCAmelCase, self )._resource_apply_dense(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : List[str], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Any, __UpperCAmelCase : int, __UpperCAmelCase : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._get_lr(var.device, var.dtype.base_dtype, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self._decay_weights_op(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(__UpperCAmelCase, self )._resource_apply_sparse(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowercase__ (self : List[str], __UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__UpperCAmelCase, __UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__UpperCAmelCase, __UpperCAmelCase ) is not None:
return False
return True
class a__ ( _lowercase ):
def __init__(self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Optional[Any] = None
@property
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE : List[Any] = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=__UpperCAmelCase, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def lowercase__ (self : List[Any] ) -> int:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__(self : Optional[Any], __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__UpperCAmelCase ), trainable=__UpperCAmelCase, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients, __UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__UpperCAmelCase )
self._accum_steps.assign_add(1 )
def lowercase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__UpperCAmelCase ) )
| 718
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
SCREAMING_SNAKE_CASE : str = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] ):
if "handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
SCREAMING_SNAKE_CASE : Union[str, Any] = 10_24
SCREAMING_SNAKE_CASE : Optional[Any] = 40_96
SCREAMING_SNAKE_CASE : List[str] = 24
SCREAMING_SNAKE_CASE : Any = 16
SCREAMING_SNAKE_CASE : Optional[Any] = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = '''relu'''
SCREAMING_SNAKE_CASE : Tuple = 10_24
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
# load HuggingFace model
SCREAMING_SNAKE_CASE : List[Any] = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=_SCREAMING_SNAKE_CASE )['''model''']
SCREAMING_SNAKE_CASE : int = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith('''decoder''' ) and "output_projection" not in key:
SCREAMING_SNAKE_CASE : Tuple = val
else:
SCREAMING_SNAKE_CASE : int = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
SCREAMING_SNAKE_CASE : Tuple = RobertaTokenizer.from_pretrained('''roberta-large''' )
SCREAMING_SNAKE_CASE : Dict = TrOCRProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
# verify logits
SCREAMING_SNAKE_CASE : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
SCREAMING_SNAKE_CASE : Dict = model(pixel_values=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
snake_case_ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 355
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Dict ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=3_0 ,lowercase__ : List[Any]=4_0_0 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=None ,lowercase__ : Optional[Any]=0.9 ,lowercase__ : List[str]=None ,lowercase__ : str=True ,lowercase__ : Dict=[0.5, 0.5, 0.5] ,lowercase__ : Dict=[0.5, 0.5, 0.5] ,):
__lowercase = size if size is not None else {'''shortest_edge''': 3_0}
__lowercase = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize_and_center_crop
__lowercase = size
__lowercase = crop_pct
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def SCREAMING_SNAKE_CASE ( self : str ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size''' ) )
self.assertTrue(hasattr(lowercase__ ,'''crop_pct''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : int ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 41
|
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
lowercase__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase__ = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def A_ ( __a : str ):
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def A_ ( ):
"""simple docstring"""
a__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__a )
a__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
TestCommand.register_subcommand(__a )
RunBeamCommand.register_subcommand(__a )
DummyDataCommand.register_subcommand(__a )
# Parse args
a__ , a__ = parser.parse_known_args()
if not hasattr(__a , """func""" ):
parser.print_help()
exit(1 )
a__ = parse_unknown_args(__a )
# Run
a__ = args.func(__a , **__a )
service.run()
if __name__ == "__main__":
main()
| 716
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351
| 0
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a__ : set[int] = {ord(char) for char in VALID_CHARS}
a__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : tuple[int, ...] ) -> str | None:
"""simple docstring"""
UpperCAmelCase = ""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE_ )
return decoded
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[int] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase = []
for key in product(SCREAMING_SNAKE_CASE_ , repeat=3 ):
UpperCAmelCase = try_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE_ )
return possibles
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[str] , SCREAMING_SNAKE_CASE_ : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ).parent.joinpath(SCREAMING_SNAKE_CASE_ ).read_text(encoding='''utf-8''' )
UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for number in data.strip().split(''',''' )]
UpperCAmelCase = filter_valid_chars(SCREAMING_SNAKE_CASE_ )
for common_word in COMMON_WORDS:
UpperCAmelCase = filter_common_word(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
UpperCAmelCase = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 51
|
"""simple docstring"""
import math
def UpperCamelCase ( _lowerCAmelCase : int ) -> str:
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Dict = 0
while num > 0:
_UpperCAmelCase : str = num % 8
_UpperCAmelCase : Any = octal + (remainder * math.floor(math.pow(10, _lowerCAmelCase ) ))
counter += 1
_UpperCAmelCase : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(_lowerCAmelCase )}'''
def UpperCamelCase ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 238
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = "Hello world! cécé herlolip"
def lowercase_ ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
snake_case = roberta.model.encoder.sentence_encoder
snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
snake_case = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , A__ )
snake_case = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case = roberta_sent_encoder.embed_tokens.weight
snake_case = roberta_sent_encoder.embed_positions.weight
snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case = roberta_sent_encoder.layer_norm.weight
snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case = model.roberta.encoder.layer[i]
snake_case = roberta_sent_encoder.layers[i]
snake_case = layer.attention
snake_case = roberta_layer.self_attn_layer_norm.weight
snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case = roberta_layer.self_attn.q_proj.weight
snake_case = roberta_layer.self_attn.q_proj.bias
snake_case = roberta_layer.self_attn.k_proj.weight
snake_case = roberta_layer.self_attn.k_proj.bias
snake_case = roberta_layer.self_attn.v_proj.weight
snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case = roberta_layer.self_attn.out_proj.weight
snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case = roberta_layer.final_layer_norm.weight
snake_case = roberta_layer.final_layer_norm.bias
# intermediate
snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case = roberta_layer.fca.weight
snake_case = roberta_layer.fca.bias
# output
snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case = roberta_layer.fca.weight
snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case = roberta.model.classification_heads["mnli"].dense.weight
snake_case = roberta.model.classification_heads["mnli"].dense.bias
snake_case = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case = roberta.model.encoder.lm_head.dense.weight
snake_case = roberta.model.encoder.lm_head.dense.bias
snake_case = roberta.model.encoder.lm_head.layer_norm.weight
snake_case = roberta.model.encoder.lm_head.layer_norm.bias
snake_case = roberta.model.encoder.lm_head.weight
snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
snake_case = model(A__ )[0]
if classification_head:
snake_case = roberta.model.classification_heads["mnli"](roberta.extract_features(A__ ) )
else:
snake_case = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
snake_case = torch.allclose(A__ , A__ , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_A = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 294
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase(self : List[str] ) -> Optional[int]:
snake_case = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_A ).to(_A )
snake_case = AutoTokenizer.from_pretrained("google/mt5-small" )
snake_case = tokenizer("Hello there" , return_tensors="pt" ).input_ids
snake_case = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
snake_case = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
snake_case = -(labels.shape[-1] * loss.item())
snake_case = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 294
| 1
|
"""simple docstring"""
import baseaa
def _a ( UpperCAmelCase__ ) -> bytes:
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def _a ( UpperCAmelCase__ ) -> str:
return baseaa.aaadecode(UpperCAmelCase__ ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__:
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : List[Any]=[8, 16, 32, 64] , __SCREAMING_SNAKE_CASE : str=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : int=1 , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = num_groups
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> str:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _a ( self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A__( unittest.TestCase ):
@cached_property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_torch
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase = BitConfig
lowerCAmelCase = False
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BitModelTester(self )
| 482
| 1
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCamelCase = data_utils.TransfoXLTokenizer
UpperCamelCase = data_utils.TransfoXLCorpus
UpperCamelCase = data_utils
UpperCamelCase = data_utils
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__snake_case , 'rb' ) as fp:
_lowercase : Optional[int] = pickle.load(__snake_case , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowercase : Tuple = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_lowercase : Union[str, Any] = corpus.vocab.__dict__
torch.save(__snake_case , __snake_case )
_lowercase : str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , __snake_case )
_lowercase : str = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__snake_case , __snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowercase : Tuple = os.path.abspath(__snake_case )
_lowercase : int = os.path.abspath(__snake_case )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowercase : Dict = TransfoXLConfig()
else:
_lowercase : List[Any] = TransfoXLConfig.from_json_file(__snake_case )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowercase : str = TransfoXLLMHeadModel(__snake_case )
_lowercase : Any = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
_lowercase : Dict = os.path.join(__snake_case , __snake_case )
_lowercase : int = os.path.join(__snake_case , __snake_case )
print(F"""Save PyTorch model to {os.path.abspath(__snake_case )}""" )
torch.save(model.state_dict() , __snake_case )
print(F"""Save configuration file to {os.path.abspath(__snake_case )}""" )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
UpperCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677
| 0
|
'''simple docstring'''
import sys
import turtle
def UpperCAmelCase ( lowerCamelCase_ :tuple[float, float] , lowerCamelCase_ :tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase ( lowerCamelCase_ :tuple[float, float] , lowerCamelCase_ :tuple[float, float] , lowerCamelCase_ :tuple[float, float] , lowerCamelCase_ :int , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
__A : Dict = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
__A : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 334
|
'''simple docstring'''
__A : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( lowerCamelCase_ :float ):
'''simple docstring'''
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
snake_case_ : int = int(lowerCamelCase_ )
snake_case_ : int = """"""
snake_case_ : List[str] = False
if decimal < 0:
snake_case_ : Any = True
decimal *= -1
while decimal > 0:
snake_case_ , snake_case_ : List[str] = divmod(lowerCamelCase_ , 16 )
snake_case_ : Tuple = values[remainder] + hexadecimal
snake_case_ : Dict = """0x""" + hexadecimal
if negative:
snake_case_ : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = 'data2vec-text'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185
| 0
|
"""simple docstring"""
from __future__ import annotations
import bisect
def UpperCAmelCase ( A__: list[int] , A__: int , A__: int = 0 , A__: int = -1 ) -> int:
if hi < 0:
__lowerCamelCase : Tuple = len(A__ )
while lo < hi:
__lowerCamelCase : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCamelCase : Optional[Any] = mid + 1
else:
__lowerCamelCase : Any = mid
return lo
def UpperCAmelCase ( A__: list[int] , A__: int , A__: int = 0 , A__: int = -1 ) -> int:
if hi < 0:
__lowerCamelCase : Tuple = len(A__ )
while lo < hi:
__lowerCamelCase : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCamelCase : str = mid + 1
else:
__lowerCamelCase : Union[str, Any] = mid
return lo
def UpperCAmelCase ( A__: list[int] , A__: int , A__: int = 0 , A__: int = -1 ) -> None:
sorted_collection.insert(bisect_left(A__ , A__ , A__ , A__ ) , A__ )
def UpperCAmelCase ( A__: list[int] , A__: int , A__: int = 0 , A__: int = -1 ) -> None:
sorted_collection.insert(bisect_right(A__ , A__ , A__ , A__ ) , A__ )
def UpperCAmelCase ( A__: list[int] , A__: int ) -> int | None:
__lowerCamelCase : Dict = 0
__lowerCamelCase : Optional[Any] = len(A__ ) - 1
while left <= right:
__lowerCamelCase : Union[str, Any] = left + (right - left) // 2
__lowerCamelCase : List[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCamelCase : str = midpoint - 1
else:
__lowerCamelCase : Optional[int] = midpoint + 1
return None
def UpperCAmelCase ( A__: list[int] , A__: int ) -> int | None:
__lowerCamelCase : int = bisect.bisect_left(A__ , A__ )
if index != len(A__ ) and sorted_collection[index] == item:
return index
return None
def UpperCAmelCase ( A__: list[int] , A__: int , A__: int , A__: int ) -> int | None:
if right < left:
return None
__lowerCamelCase : Dict = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(A__ , A__ , A__ , midpoint - 1 )
else:
return binary_search_by_recursion(A__ , A__ , midpoint + 1 , A__ )
if __name__ == "__main__":
a_ : Union[str, Any] = input('''Enter numbers separated by comma:\n''').strip()
a_ : int = sorted(int(item) for item in user_input.split(''','''))
a_ : Dict = int(input('''Enter a single number to be found in the list:\n'''))
a_ : int = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 594
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : str = DebertaTokenizer
__a : Optional[int] = True
__a : Tuple = DebertaTokenizerFast
def snake_case_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__lowerCamelCase : int = dict(zip(__a , range(len(__a ) ) ) )
__lowerCamelCase : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCamelCase : Optional[int] = {'unk_token': '[UNK]'}
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def snake_case_ ( self , **__a ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def snake_case_ ( self , __a ):
__lowerCamelCase : Dict = 'lower newer'
__lowerCamelCase : Dict = 'lower newer'
return input_text, output_text
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = self.get_tokenizer()
__lowerCamelCase : str = 'lower newer'
__lowerCamelCase : str = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowerCamelCase : Optional[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : List[Any] = tokenizer('Hello' , 'World' )
__lowerCamelCase : int = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __a )
@slow
def snake_case_ ( self ):
__lowerCamelCase : int = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__lowerCamelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__lowerCamelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=__a , add_prefix_space=__a )
__lowerCamelCase : str = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__a , add_prefix_space=__a )
__lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(__a )
__lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case_ ( self ):
__lowerCamelCase : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCamelCase : str = tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowerCamelCase : Optional[int] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__lowerCamelCase : Any = tokenizer(__a , padding=__a )
__lowerCamelCase : int = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding['input_ids']]
# fmt: off
__lowerCamelCase : Optional[int] = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCamelCase : Union[str, Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __a )
for expected, decoded in zip(__a , __a ):
self.assertEqual(__a , __a )
| 594
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase :Optional[int] = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Any = "resnet"
snake_case__ : str = ["basic", "bottleneck"]
def __init__( self , lowercase__=3 , lowercase__=64 , lowercase__=[256, 512, 1_024, 2_048] , lowercase__=[3, 4, 6, 3] , lowercase__="bottleneck" , lowercase__="relu" , lowercase__=False , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Dict:
super().__init__(**lowercase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : int = embedding_size
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : str = layer_type
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = downsample_in_first_stage
SCREAMING_SNAKE_CASE : Tuple = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(lowercase__ ) + 1 )]
SCREAMING_SNAKE_CASE : Any = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = version.parse("1.11" )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ) -> float:
return 1E-3
| 706
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
_lowerCAmelCase :List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCAmelCase :List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase ( a_ ) -> List[str]:
'''simple docstring'''
with open(a_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(a_ )
return im.convert('RGB' )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case__ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
snake_case__ : Optional[str] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
snake_case__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
snake_case__ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _UpperCamelCase ( self ) -> Optional[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )} , )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : str = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , a_ , a_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : int = os.path.join(data_args.validation_dir , '**' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
'imagefolder' , data_files=a_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Optional[int] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : Optional[int] = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : List[Any] = split['train']
SCREAMING_SNAKE_CASE : Tuple = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : List[Any] = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = {}, {}
for i, label in enumerate(a_ ):
SCREAMING_SNAKE_CASE : List[str] = str(a_ )
SCREAMING_SNAKE_CASE : Dict = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : List[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a_ ) , labelaid=a_ , idalabel=a_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Any = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
RandomResizedCrop(a_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
Resize(a_ ),
CenterCrop(a_ ),
ToTensor(),
normalize,
] )
def train_transforms(a_ ):
SCREAMING_SNAKE_CASE : int = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(a_ ):
SCREAMING_SNAKE_CASE : Any = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(a_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : int = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(a_ )
# Initalize our trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Tuple = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , a_ )
trainer.save_metrics('eval' , a_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
if __name__ == "__main__":
main()
| 179
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple=13 , lowerCamelCase_ : Tuple=30 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Optional[Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : List[str] = num_patches + 1
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFViTModel(config=snake_case__ )
SCREAMING_SNAKE_CASE : str = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_size // 2
SCREAMING_SNAKE_CASE : List[Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = TFViTForImageClassification(snake_case__ )
SCREAMING_SNAKE_CASE : List[str] = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE : Dict = self.image_size // 2
SCREAMING_SNAKE_CASE : List[Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE : Tuple = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Optional[int] = TFViTForImageClassification(snake_case__ )
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFViTModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(snake_case__ )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(snake_case__ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
| 379
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any]=7 , snake_case__ : Tuple=3 , snake_case__ : Optional[Any]=18 , snake_case__ : Dict=30 , snake_case__ : Optional[int]=4_00 , snake_case__ : str=True , snake_case__ : str=None , snake_case__ : Optional[int]=True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : List[Any] = image_size
UpperCAmelCase__ : Tuple = min_resolution
UpperCAmelCase__ : Optional[int] = max_resolution
UpperCAmelCase__ : Any = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = do_normalize
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Any = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "clusters" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case__ , "image_processor.json" )
image_processor_first.to_json_file(snake_case__ )
UpperCAmelCase__ : Any = self.image_processing_class.from_json_file(snake_case__ ).to_dict()
UpperCAmelCase__ : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case__ )
UpperCAmelCase__ : List[str] = self.image_processing_class.from_pretrained(snake_case__ ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def snake_case_ ( ):
UpperCAmelCase__ : List[str] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase__ : Tuple = Image.open(dataset[4]["file"] )
UpperCAmelCase__ : List[Any] = Image.open(dataset[5]["file"] )
UpperCAmelCase__ : Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase__ : Union[str, Any] = prepare_images()
# test non-batched
UpperCAmelCase__ : Optional[Any] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
UpperCAmelCase__ : Union[str, Any] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case__ )
# test batched
UpperCAmelCase__ : List[Any] = image_processing(snake_case__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
UpperCAmelCase__ : List[str] = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case__ )
| 199
| 0
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__SCREAMING_SNAKE_CASE = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13
| 1
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class snake_case :
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict=1_3 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : int=6_4 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Tuple=None , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: Optional[Any] = seq_length
__lowerCAmelCase: Optional[int] = is_training
__lowerCAmelCase: Tuple = use_input_mask
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: int = use_labels
__lowerCAmelCase: Any = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Optional[int] = embedding_size
__lowerCAmelCase: Optional[Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: str = intermediate_size
__lowerCAmelCase: List[Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: Tuple = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Dict = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Any = initializer_range
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: Union[str, Any] = num_choices
__lowerCAmelCase: Optional[int] = scope
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: Dict = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase: Tuple = None
__lowerCAmelCase: Any = None
__lowerCAmelCase: Tuple = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = MegatronBertModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
__lowerCAmelCase: List[Any] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__)
__lowerCAmelCase: List[Any] = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = MegatronBertForMaskedLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = MegatronBertForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = MegatronBertForNextSentencePrediction(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Any = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowercase_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = MegatronBertForPreTraining(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Any = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = MegatronBertForQuestionAnswering(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Any = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: str = MegatronBertForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.num_labels
__lowerCAmelCase: Any = MegatronBertForTokenClassification(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.num_choices
__lowerCAmelCase: Tuple = MegatronBertForMultipleChoice(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Dict = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase: Any = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase: Any = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase: Optional[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Any = config_and_inputs
__lowerCAmelCase: Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def lowercase_ ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=False)-> Any:
'''simple docstring'''
__lowerCAmelCase: int = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__)
if return_labels:
if model_class in get_values(UpperCamelCase__):
__lowerCAmelCase: Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__)
__lowerCAmelCase: Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__)
return inputs_dict
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
__lowerCAmelCase: List[Any] = MegatronBertModelTester(self)
__lowerCAmelCase: Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Any)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCamelCase__)
def lowercase_ ( self : Any)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCamelCase__)
def lowercase_ ( self : int)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCamelCase__)
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCamelCase__)
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCamelCase__)
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCamelCase__)
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCamelCase__)
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
__A = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available.")
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__lowerCAmelCase: Tuple = os.path.join(os.environ["MYDIR"] , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = MegatronBertModel.from_pretrained(UpperCamelCase__)
model.to(UpperCamelCase__)
model.half()
__lowerCAmelCase: int = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
__lowerCAmelCase: str = model(UpperCamelCase__)[0]
__lowerCAmelCase: Dict = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , UpperCamelCase__)
__lowerCAmelCase: str = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3):
for jj in range(3):
__lowerCAmelCase: Union[str, Any] = output[0, ii, jj]
__lowerCAmelCase: List[str] = expected[3 * ii + jj]
__lowerCAmelCase: int = "ii={} jj={} a={} b={}".format(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
self.assertTrue(math.isclose(UpperCamelCase__ , UpperCamelCase__ , rel_tol=UpperCamelCase__ , abs_tol=UpperCamelCase__) , msg=UpperCamelCase__)
| 346
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ = None ):
lowercase__ = word_bank or []
# create a table
lowercase__ = len(A__ ) + 1
lowercase__ = []
for _ in range(A__ ):
table.append([] )
# seed value
lowercase__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A__ )] == word:
lowercase__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A__ )]:
combination.reverse()
return table[len(A__ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 622
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = field(default="""summarization""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase__ = Features({"""text""": Value("""string""" )} )
UpperCAmelCase__ = Features({"""summary""": Value("""string""" )} )
UpperCAmelCase__ = "text"
UpperCAmelCase__ = "summary"
@property
def __snake_case ( self):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 613
|
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613
| 1
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=10_00 ) -> Union[str, Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCamelCase : Any = n - 1
__lowerCamelCase : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCamelCase : Tuple = 0
while count < prec:
__lowerCamelCase : List[str] = random.randint(2 , n - 1 )
__lowerCamelCase : Tuple = bin_exp_mod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if b != 1:
__lowerCamelCase : Optional[Any] = True
for _ in range(UpperCAmelCase_ ):
if b == n - 1:
__lowerCamelCase : Any = False
break
__lowerCamelCase : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ : Tuple = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 13
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase : List[Any] ={
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] =[
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 228
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
"""simple docstring"""
lowerCAmelCase__ = "all_checks"
lowerCAmelCase__ = "basic_checks"
lowerCAmelCase__ = "no_checks"
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict , __lowerCamelCase: Optional[int]=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowercase_ = " for " + verification_name if verification_name is not None else ""
if len(__lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[dict] , __lowerCamelCase: dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
if len(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCamelCase ) - set(__lowerCamelCase ) ) )
lowercase_ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: bool = True ):
'''simple docstring'''
if record_checksum:
lowercase_ = shaaaa()
with open(__lowerCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(__lowerCamelCase )
lowercase_ = m.hexdigest()
else:
lowercase_ = None
return {"num_bytes": os.path.getsize(__lowerCamelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 601
|
from typing import Any
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = data
lowercase_ = None
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = None
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.head
while temp is not None:
print(temp.data , end=" " )
lowercase_ = temp.next
print()
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = Node(UpperCAmelCase )
lowercase_ = self.head
lowercase_ = new_node
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
lowercase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase_ = node_a.next
lowercase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase_ = node_a.next
if node_a is None or node_a is None:
return
lowercase_ , lowercase_ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 601
| 1
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
a = HfArgumentParser(snake_case_ )
a = parser.parse_args_into_dataclasses()[0]
a = TensorFlowBenchmark(args=snake_case_ )
try:
a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
a = ''' '''.join(str(snake_case_ ).split(''' ''' )[:-1] )
a = ''''''
a = eval(str(snake_case_ ).split(''' ''' )[-1] )
a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(snake_case_ )
if len(snake_case_ ) > 0:
a = full_error_msg + begin_error_msg + str(snake_case_ )
raise ValueError(snake_case_ )
benchmark.run()
if __name__ == "__main__":
main()
| 387
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = 0.0
for coeff in reversed(snake_case_ ):
a = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : int = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 387
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _lowerCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BartphoTokenizer
snake_case_ = False
snake_case_ = True
def __lowercase ( self : str ) -> Any:
'''simple docstring'''
super().setUp()
_lowercase : Any = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
_lowercase : Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
_lowercase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
_lowercase : List[str] = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[int] , **UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : Tuple = '''This is a là test'''
_lowercase : Tuple = '''This is a<unk><unk> test'''
return input_text, output_text
def __lowercase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowercase : int = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
_lowercase : List[str] = '''This is a là test'''
_lowercase : Dict = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
_lowercase : int = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = tokens + [tokenizer.unk_token]
_lowercase : Union[str, Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 411
|
'''simple docstring'''
from math import factorial
def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : int ) ->int:
'''simple docstring'''
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 411
| 1
|
from collections import defaultdict
from math import gcd
def __lowerCAmelCase ( _UpperCamelCase : int = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _UpperCamelCase , 2 ):
if gcd(_UpperCamelCase , _UpperCamelCase ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase , limit + 1 , _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 439
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_5_0 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=snake_case__ , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_5_0 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE = DDPMScheduler()
SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_5_0 , )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type='np' , ).images
SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = 'a cat sitting on a park bench'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=5 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 439
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
_UpperCamelCase : List[str] = bs[:]
_UpperCamelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase : str = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ ,lowercase_ ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = set()
_UpperCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase : Dict = char
return pairs
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int]="replace" , __a : Any="<s>" , __a : Dict="</s>" , __a : List[str]="</s>" , __a : int="<s>" , __a : Any="<unk>" , __a : str="<pad>" , __a : Any="<mask>" , __a : List[Any]=False , **__a : List[str] , ) -> str:
_UpperCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
_UpperCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
_UpperCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
_UpperCamelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
_UpperCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
_UpperCamelCase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
_UpperCamelCase : int = json.load(__a )
_UpperCamelCase : str = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : int = errors # how to handle errors in decoding
_UpperCamelCase : str = bytes_to_unicode()
_UpperCamelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_UpperCamelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
_UpperCamelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : Any = {}
_UpperCamelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return len(self.encoder )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[Any] ) -> Tuple:
if token in self.cache:
return self.cache[token]
_UpperCamelCase : Optional[int] = tuple(__a )
_UpperCamelCase : List[Any] = get_pairs(__a )
if not pairs:
return token
while True:
_UpperCamelCase : List[str] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = bigram
_UpperCamelCase : Dict = []
_UpperCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
_UpperCamelCase : Dict = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase : str = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase : str = tuple(__a )
_UpperCamelCase : Optional[int] = new_word
if len(__a ) == 1:
break
else:
_UpperCamelCase : Any = get_pairs(__a )
_UpperCamelCase : str = " ".join(__a )
_UpperCamelCase : Dict = word
return word
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] ) -> Tuple:
_UpperCamelCase : str = []
for token in re.findall(self.pat , __a ):
_UpperCamelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(" " ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int ) -> Tuple:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict ) -> Union[str, Any]:
return self.decoder.get(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple ) -> str:
_UpperCamelCase : Tuple = "".join(__a )
_UpperCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Tuple = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Dict = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_UpperCamelCase : str = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_UpperCamelCase : Union[str, Any] = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[int] = [self.sep_token_id]
_UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : int=False , **__a : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
_UpperCamelCase : List[Any] = " " + text
return (text, kwargs)
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[Dict[str, EncodedInput], BatchEncoding] , __a : Optional[int] = None , __a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __a : Optional[int] = None , __a : Optional[bool] = None , ) -> dict:
_UpperCamelCase : Any = super()._pad(
encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase : str = len(encoded_inputs["global_attention_mask"] ) != len(__a )
if needs_to_be_padded:
_UpperCamelCase : List[str] = len(__a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = [False] * len(lowerCAmelCase__ )
UpperCAmelCase__ : str = [-1] * len(lowerCAmelCase__ )
def dfs(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c )
for i in range(len(lowerCAmelCase__ ) ):
if not visited[i]:
dfs(lowerCAmelCase__ , 0 )
for i in range(len(lowerCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 75
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__UpperCamelCase : Dict = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Dict = EfficientNetConfig()
UpperCamelCase__ : Tuple = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCamelCase__ : Tuple = CONFIG_MAP[model_name]['''width_coef''']
UpperCamelCase__ : Any = CONFIG_MAP[model_name]['''depth_coef''']
UpperCamelCase__ : Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCamelCase__ : Optional[int] = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCamelCase__ : Optional[int] = CONFIG_MAP[model_name]['''dw_padding''']
UpperCamelCase__ : Union[str, Any] = '''huggingface/label-files'''
UpperCamelCase__ : Tuple = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : List[str] = 1000
UpperCamelCase__ : Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : List[str] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = CONFIG_MAP[model_name]['''image_size''']
UpperCamelCase__ : Any = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=SCREAMING_SNAKE_CASE , )
return preprocessor
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCamelCase__ : Optional[Any] = sorted(set(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )}
UpperCamelCase__ : Any = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCamelCase__ : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCamelCase__ : Any = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase__ : Any = '''efficientnet.''' + item[1]
UpperCamelCase__ : List[Any] = '''classifier.weight'''
UpperCamelCase__ : Dict = '''classifier.bias'''
return key_mapping
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase__ : Dict = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase__ : str = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase__ : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase__ : int = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) )
else:
UpperCamelCase__ : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Tuple = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE , weights='''imagenet''' , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='''softmax''' , )
UpperCamelCase__ : List[Any] = original_model.trainable_variables
UpperCamelCase__ : Union[str, Any] = original_model.non_trainable_variables
UpperCamelCase__ : Any = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase__ : Any = param.numpy()
UpperCamelCase__ : Tuple = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase__ : Tuple = get_efficientnet_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCamelCase__ : Any = rename_keys(SCREAMING_SNAKE_CASE )
replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
UpperCamelCase__ : Optional[Any] = convert_image_processor(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = hf_model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Tuple = CONFIG_MAP[model_name]['''image_size''']
UpperCamelCase__ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase__ : int = image.img_to_array(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase__ : Optional[int] = original_model.predict(SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.mkdir(SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCamelCase__ : Optional[Any] = F"efficientnet-{model_name}"
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__UpperCamelCase : Dict = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 106
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ : Optional[int] = logging.get_logger(__name__)
# General docstring
lowercase_ : Dict = '''MobileNetV1Config'''
# Base docstring
lowercase_ : Dict = '''google/mobilenet_v1_1.0_224'''
lowercase_ : int = [1, 1024, 7, 7]
# Image classification docstring
lowercase_ : List[Any] = '''google/mobilenet_v1_1.0_224'''
lowercase_ : Optional[int] = '''tabby, tabby cat'''
lowercase_ : Any = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
_snake_case : List[str] = {}
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = model.mobilenet_va
else:
_snake_case : Tuple = model
_snake_case : Union[str, Any] = 'MobilenetV1/Conv2d_0/'
_snake_case : Dict = backbone.conv_stem.convolution.weight
_snake_case : str = backbone.conv_stem.normalization.bias
_snake_case : Union[str, Any] = backbone.conv_stem.normalization.weight
_snake_case : Dict = backbone.conv_stem.normalization.running_mean
_snake_case : str = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_snake_case : Any = i + 1
_snake_case : str = i * 2
_snake_case : Tuple = backbone.layer[pt_index]
_snake_case : Tuple = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_snake_case : Any = pointer.convolution.weight
_snake_case : Any = pointer.normalization.bias
_snake_case : Any = pointer.normalization.weight
_snake_case : int = pointer.normalization.running_mean
_snake_case : Optional[Any] = pointer.normalization.running_var
_snake_case : str = backbone.layer[pt_index + 1]
_snake_case : Tuple = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_snake_case : List[Any] = pointer.convolution.weight
_snake_case : Dict = pointer.normalization.bias
_snake_case : Optional[Any] = pointer.normalization.weight
_snake_case : Optional[int] = pointer.normalization.running_mean
_snake_case : Dict = pointer.normalization.running_var
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_snake_case : List[Any] = model.classifier.weight
_snake_case : str = model.classifier.bias
return tf_to_pt_map
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_snake_case : List[str] = tf.train.list_variables(__lowerCAmelCase )
_snake_case : str = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
_snake_case : Optional[int] = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Union[str, Any] = array
# Build TF to PyTorch weights loading map
_snake_case : Union[str, Any] = _build_tf_to_pytorch_map(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
_snake_case : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_snake_case : Tuple = np.transpose(__lowerCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_snake_case : List[Any] = array.squeeze().transpose()
else:
_snake_case : Optional[Any] = np.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
_snake_case : int = torch.from_numpy(__lowerCAmelCase )
tf_weights.pop(__lowerCAmelCase , __lowerCAmelCase )
tf_weights.pop(name + '/RMSProp' , __lowerCAmelCase )
tf_weights.pop(name + '/RMSProp_1' , __lowerCAmelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , __lowerCAmelCase )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case : Any = features.shape[-2:]
_snake_case , _snake_case : Optional[int] = conv_layer.stride
_snake_case , _snake_case : int = conv_layer.kernel_size
if in_height % stride_height == 0:
_snake_case : List[str] = max(kernel_height - stride_height , 0 )
else:
_snake_case : Optional[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_snake_case : Tuple = max(kernel_width - stride_width , 0 )
else:
_snake_case : List[Any] = max(kernel_width - (in_width % stride_width) , 0 )
_snake_case : List[str] = pad_along_width // 2
_snake_case : List[str] = pad_along_width - pad_left
_snake_case : Dict = pad_along_height // 2
_snake_case : Any = pad_along_height - pad_top
_snake_case : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowerCAmelCase , __lowerCAmelCase , 'constant' , 0.0 )
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : MobileNetVaConfig , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[bool] = True , lowerCamelCase_ : Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_snake_case : int = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_snake_case : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_snake_case : List[Any] = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=lowerCamelCase_ , groups=lowerCamelCase_ , bias=lowerCamelCase_ , padding_mode='zeros' , )
if use_normalization:
_snake_case : Union[str, Any] = nn.BatchNormad(
num_features=lowerCamelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCamelCase_ , track_running_stats=lowerCamelCase_ , )
else:
_snake_case : Any = None
if use_activation:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCamelCase_ ):
_snake_case : Any = ACTaFN[config.hidden_act]
else:
_snake_case : Any = config.hidden_act
else:
_snake_case : int = None
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_snake_case : Union[str, Any] = apply_tf_padding(lowerCamelCase_ , self.convolution )
_snake_case : Optional[int] = self.convolution(lowerCamelCase_ )
if self.normalization is not None:
_snake_case : Any = self.normalization(lowerCamelCase_ )
if self.activation is not None:
_snake_case : Dict = self.activation(lowerCamelCase_ )
return features
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[Any] = MobileNetVaConfig
_UpperCamelCase : Union[str, Any] = load_tf_weights_in_mobilenet_va
_UpperCamelCase : str = "mobilenet_v1"
_UpperCamelCase : Union[str, Any] = "pixel_values"
_UpperCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ : str = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : MobileNetVaConfig , lowerCamelCase_ : bool = True ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
_snake_case : List[str] = config
_snake_case : int = 32
_snake_case : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
_snake_case : Union[str, Any] = MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=config.num_channels , out_channels=lowerCamelCase_ , kernel_size=3 , stride=2 , )
_snake_case : List[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_snake_case : Any = nn.ModuleList()
for i in range(13 ):
_snake_case : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_snake_case : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=1 , ) )
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , ):
'''simple docstring'''
_snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_snake_case : Union[str, Any] = self.conv_stem(lowerCamelCase_ )
_snake_case : List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_snake_case : Optional[int] = layer_module(lowerCamelCase_ )
if output_hidden_states:
_snake_case : List[str] = all_hidden_states + (hidden_states,)
_snake_case : Union[str, Any] = hidden_states
if self.pooler is not None:
_snake_case : Any = torch.flatten(self.pooler(lowerCamelCase_ ) , start_dim=1 )
else:
_snake_case : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , )
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : MobileNetVaConfig ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
_snake_case : int = config.num_labels
_snake_case : List[Any] = MobileNetVaModel(lowerCamelCase_ )
_snake_case : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_snake_case : List[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase_ )
_snake_case : Optional[Any] = nn.Linear(lowerCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , ):
'''simple docstring'''
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Union[str, Any] = self.mobilenet_va(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_snake_case : int = self.classifier(self.dropout(lowerCamelCase_ ) )
_snake_case : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : List[Any] = 'single_label_classification'
else:
_snake_case : List[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case : str = MSELoss()
if self.num_labels == 1:
_snake_case : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case : Tuple = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case : Optional[Any] = CrossEntropyLoss()
_snake_case : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case : Any = BCEWithLogitsLoss()
_snake_case : Dict = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
_snake_case : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , )
| 304
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : int=13 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Optional[Any]=[10, 20, 30, 40] , lowerCamelCase_ : Optional[Any]=[2, 2, 3, 2] , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]=37 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : Optional[int]=None , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Optional[int] = batch_size
_snake_case : List[str] = image_size
_snake_case : Union[str, Any] = num_channels
_snake_case : Union[str, Any] = num_stages
_snake_case : int = hidden_sizes
_snake_case : Tuple = depths
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : List[Any] = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : List[str] = num_labels
_snake_case : List[str] = initializer_range
_snake_case : Union[str, Any] = out_features
_snake_case : Dict = out_indices
_snake_case : List[str] = scope
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Dict = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = ConvNextVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : Dict = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : Optional[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : List[str] = model(lowerCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : List[Any] = None
_snake_case : str = ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_snake_case : List[Any] = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[Any] = config_and_inputs
_snake_case : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : str = config_and_inputs
_snake_case : Dict = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Any = ConvNextVaModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : str = True
if model_class.__name__ in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]:
continue
_snake_case : Dict = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
_snake_case : Union[str, Any] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
_snake_case : int = model(**lowerCamelCase_ ).loss
loss.backward()
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[str] = False
_snake_case : str = True
if (
model_class.__name__
in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : Union[str, Any] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
_snake_case : Union[str, Any] = model(**lowerCamelCase_ ).loss
loss.backward()
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(lowerCamelCase_ )
_snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Union[str, Any] = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
_snake_case : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
_snake_case : Dict = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_snake_case : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : int = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = ConvNextVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__( ):
_snake_case : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCamelCase_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Optional[int] = prepare_img()
_snake_case : List[Any] = preprocessor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**lowerCamelCase_ )
# verify the logits
_snake_case : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_snake_case : Optional[int] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 304
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =FunnelConfig.from_json_file(a__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase =FunnelBaseModel(a__ ) if base_model else FunnelModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a__ , a__ , a__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( lowerCamelCase : Dict , lowerCamelCase : bool = True , lowerCamelCase : float = math.inf , lowerCamelCase : float = -math.inf , lowerCamelCase : float = math.inf , lowerCamelCase : float = -math.inf , lowerCamelCase : bool = False , lowerCamelCase : float = 100 , lowerCamelCase : float = 0.01 , lowerCamelCase : float = 1 , ):
lowerCAmelCase = False
lowerCAmelCase = search_prob
lowerCAmelCase = start_temperate
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = None
while not search_end:
lowerCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase = current_state
scores.append(lowerCamelCase )
iterations += 1
lowerCAmelCase = None
lowerCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase = random.randint(0 , len(lowerCamelCase ) - 1 ) # picking a random neighbor
lowerCAmelCase = neighbors.pop(lowerCamelCase )
lowerCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase = picked_neighbor
else:
lowerCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase = picked_neighbor
lowerCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase = True
else:
lowerCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCamelCase ) , lowerCamelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case =simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__snake_case =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case =simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[Any] ):
return (3 * x**2) - (6 * y)
__snake_case =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case =simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
__snake_case =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case =simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 133
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
# Initialise PyTorch model
lowerCAmelCase = RemBertConfig.from_json_file(lowerCamelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCamelCase ) ) )
lowerCAmelCase = RemBertModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCamelCase ) )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 133
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _a ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=() , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict="no" , _SCREAMING_SNAKE_CASE : List[Any]="29500" ):
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_SCREAMING_SNAKE_CASE = True
elif "IPython" in sys.modules:
_SCREAMING_SNAKE_CASE = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
_SCREAMING_SNAKE_CASE = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , lowercase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = PrepareForLaunch(lowercase__ , distributed_type="TPU" )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowercase__ , args=lowercase__ , nprocs=lowercase__ , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*lowercase__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase__ , master_addr="127.0.01" , master_port=lowercase__ , mixed_precision=lowercase__ ):
_SCREAMING_SNAKE_CASE = PrepareForLaunch(lowercase__ , distributed_type="MULTI_GPU" )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowercase__ , args=lowercase__ , nprocs=lowercase__ , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_SCREAMING_SNAKE_CASE = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*lowercase__ )
def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple=() , _SCREAMING_SNAKE_CASE : List[str]=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase__ , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_SCREAMING_SNAKE_CASE = PrepareForLaunch(lowercase__ , debug=lowercase__ )
start_processes(lowercase__ , args=lowercase__ , nprocs=lowercase__ , start_method="fork" )
| 702
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_snake_case : Optional[int] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
_snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ):
_SCREAMING_SNAKE_CASE = "https://pypi.org/pypi/diffusers/json"
_SCREAMING_SNAKE_CASE = json.loads(request.urlopen(_SCREAMING_SNAKE_CASE ).read() )["releases"].keys()
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : version.Version(_SCREAMING_SNAKE_CASE ) )
def _a ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
init_hf_modules()
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
# Imports of the form `import .xxx`
_SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+\.(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE : List[str] ):
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = [module_file]
_SCREAMING_SNAKE_CASE = []
# Let's recurse through all relative imports
while not no_change:
_SCREAMING_SNAKE_CASE = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ).parent
_SCREAMING_SNAKE_CASE = [str(module_path / m ) for m in new_imports]
_SCREAMING_SNAKE_CASE = [f for f in new_import_files if f not in all_relative_imports]
_SCREAMING_SNAKE_CASE = [F'{f}.py' for f in new_import_files]
_SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(_SCREAMING_SNAKE_CASE )
return all_relative_imports
def _a ( _SCREAMING_SNAKE_CASE : str ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
# Imports of the form `import xxx`
_SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
_SCREAMING_SNAKE_CASE = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_SCREAMING_SNAKE_CASE = list(set(_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE = []
for imp in imports:
try:
importlib.import_module(_SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(_SCREAMING_SNAKE_CASE )}. Run `pip install {" ".join(_SCREAMING_SNAKE_CASE )}`' )
return get_relative_imports(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = module_path.replace(os.path.sep , "." )
_SCREAMING_SNAKE_CASE = importlib.import_module(_SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(_SCREAMING_SNAKE_CASE )
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : List[Any] ):
from ..pipelines import DiffusionPipeline
_SCREAMING_SNAKE_CASE = dict(inspect.getmembers(_SCREAMING_SNAKE_CASE , inspect.isclass ) )
_SCREAMING_SNAKE_CASE = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _SCREAMING_SNAKE_CASE )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
_SCREAMING_SNAKE_CASE = cls
return pipeline_class
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , ):
_SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = module_file_or_url
_SCREAMING_SNAKE_CASE = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_SCREAMING_SNAKE_CASE = get_diffusers_versions()
# cut ".dev0"
_SCREAMING_SNAKE_CASE = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_SCREAMING_SNAKE_CASE = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_SCREAMING_SNAKE_CASE = F'v{revision}'
elif revision == "main":
_SCREAMING_SNAKE_CASE = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_SCREAMING_SNAKE_CASE = COMMUNITY_PIPELINES_URL.format(revision=_SCREAMING_SNAKE_CASE , pipeline=_SCREAMING_SNAKE_CASE )
try:
_SCREAMING_SNAKE_CASE = cached_download(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE = "git"
_SCREAMING_SNAKE_CASE = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_SCREAMING_SNAKE_CASE = hf_hub_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_SCREAMING_SNAKE_CASE = check_imports(_SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
_SCREAMING_SNAKE_CASE = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
_SCREAMING_SNAKE_CASE = F'{module_needed}.py'
shutil.copy(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = use_auth_token
elif use_auth_token is True:
_SCREAMING_SNAKE_CASE = HfFolder.get_token()
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = model_info(_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_SCREAMING_SNAKE_CASE = submodule_path / commit_hash
_SCREAMING_SNAKE_CASE = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_SCREAMING_SNAKE_CASE , F'{module_needed}.py' , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , **_SCREAMING_SNAKE_CASE : Tuple , ):
_SCREAMING_SNAKE_CASE = get_cached_module_file(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return get_class_in_module(_SCREAMING_SNAKE_CASE , final_module.replace(".py" , "" ) )
| 493
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Tuple = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ : Optional[Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase_ : Tuple = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
"score": ANY(lowerCAmelCase_ ),
"label": ANY(lowerCAmelCase_ ),
"box": {"xmin": ANY(lowerCAmelCase_ ), "ymin": ANY(lowerCAmelCase_ ), "xmax": ANY(lowerCAmelCase_ ), "ymax": ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ : Dict = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
UpperCAmelCase_ : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Any = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
UpperCAmelCase_ : List[str] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
pass
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[int] = 0.2
UpperCAmelCase_ : Any = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Dict = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 95
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''informer'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 100 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> Tuple:
# time series specific configuration
UpperCAmelCase_ : str = prediction_length
UpperCAmelCase_ : Tuple = context_length or prediction_length
UpperCAmelCase_ : Any = distribution_output
UpperCAmelCase_ : Union[str, Any] = loss
UpperCAmelCase_ : Any = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : List[Any] = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Optional[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Any = embedding_dimension
else:
UpperCAmelCase_ : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Optional[Any] = use_cache
# Informer
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : List[str] = sampling_factor
UpperCAmelCase_ : Union[str, Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase :str = 16
__lowerCAmelCase :int = 32
def A ( UpperCAmelCase , UpperCAmelCase = 16 ):
_snake_case : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
_snake_case : str = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Dict = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Any = 16
elif accelerator.mixed_precision != "no":
_snake_case : Tuple = 8
else:
_snake_case : int = None
return tokenizer.pad(
UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_snake_case : Tuple = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
_snake_case : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase :Dict = mocked_dataloaders # noqa: F811
def A ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1":
_snake_case : str = 2
# New Code #
_snake_case : Union[str, Any] = int(args.gradient_accumulation_steps )
_snake_case : Dict = int(args.local_sgd_steps )
# Initialize accelerator
_snake_case : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Union[str, Any] = config["lr"]
_snake_case : Union[str, Any] = int(config["num_epochs"] )
_snake_case : str = int(config["seed"] )
_snake_case : List[str] = int(config["batch_size"] )
_snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(UpperCAmelCase )
_snake_case , _snake_case : Optional[int] = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : int = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
_snake_case : Any = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase , model=UpperCAmelCase , local_sgd_steps=UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
_snake_case : Optional[Any] = model(**UpperCAmelCase )
_snake_case : Optional[Any] = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Optional[Any] = model(**UpperCAmelCase )
_snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
_snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
def A ( ):
_snake_case : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=UpperCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_snake_case : List[str] = parser.parse_args()
_snake_case : str = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 278
|
import numpy as np
__lowerCAmelCase :Dict = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class _a:
def __init__( self ) -> None:
'''simple docstring'''
_snake_case : Optional[Any] = np.array(__snake_case )
def lowercase ( self , __snake_case ) -> np.ndarray:
'''simple docstring'''
_snake_case , _snake_case : List[Any] = np.where(letter == self.SQUARE )
_snake_case : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = message.lower()
_snake_case : List[Any] = message.replace(" " , "" )
_snake_case : Any = message.replace("j" , "i" )
_snake_case : Tuple = np.empty((2, len(__snake_case )) )
for letter_index in range(len(__snake_case ) ):
_snake_case : Dict = self.letter_to_numbers(message[letter_index] )
_snake_case : Tuple = numbers[0]
_snake_case : List[Any] = numbers[1]
_snake_case : Any = first_step.reshape(2 * len(__snake_case ) )
_snake_case : Optional[Any] = ""
for numbers_index in range(len(__snake_case ) ):
_snake_case : Optional[int] = int(second_step[numbers_index * 2] )
_snake_case : List[str] = int(second_step[(numbers_index * 2) + 1] )
_snake_case : Any = self.numbers_to_letter(__snake_case , __snake_case )
_snake_case : Any = encoded_message + letter
return encoded_message
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Union[str, Any] = message.lower()
message.replace(" " , "" )
_snake_case : Tuple = np.empty(2 * len(__snake_case ) )
for letter_index in range(len(__snake_case ) ):
_snake_case : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
_snake_case : Tuple = numbers[0]
_snake_case : Any = numbers[1]
_snake_case : Optional[Any] = first_step.reshape((2, len(__snake_case )) )
_snake_case : Union[str, Any] = ""
for numbers_index in range(len(__snake_case ) ):
_snake_case : Dict = int(second_step[0, numbers_index] )
_snake_case : Optional[Any] = int(second_step[1, numbers_index] )
_snake_case : Union[str, Any] = self.numbers_to_letter(__snake_case , __snake_case )
_snake_case : str = decoded_message + letter
return decoded_message
| 278
| 1
|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase__ = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCAmelCase__ = re.compile(R'^\s*else:')
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
if _re_test_backend.search(lowerCamelCase_) is None:
return None
UpperCamelCase__ : Dict = [b[0] for b in _re_backend.findall(lowerCamelCase_)]
backends.sort()
return "_and_".join(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> str:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase__ : List[str] = f.readlines()
UpperCamelCase__ : List[str] = 0
while line_index < len(lowerCamelCase_) and not lines[line_index].startswith('_import_structure = {'):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase__ : Optional[int] = []
while not lines[line_index].startswith('if TYPE_CHECKING') and find_backend(lines[line_index]) is None:
UpperCamelCase__ : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_):
UpperCamelCase__ : int = _re_one_line_import_struct.search(lowerCamelCase_).groups()[0]
UpperCamelCase__ : Tuple = re.findall('\[([^\]]+)\]' , lowerCamelCase_)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ')])
line_index += 1
continue
UpperCamelCase__ : Optional[Any] = _re_import_struct_key_value.search(lowerCamelCase_)
if single_line_import_search is not None:
UpperCamelCase__ : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ') if len(lowerCamelCase_) > 0]
objects.extend(lowerCamelCase_)
elif line.startswith(' ' * 8 + '"'):
objects.append(line[9:-3])
line_index += 1
UpperCamelCase__ : Union[str, Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING'):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase__ : Optional[int] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
UpperCamelCase__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
UpperCamelCase__ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 4):
UpperCamelCase__ : Any = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_).groups()[0])
elif _re_import_struct_add_many.search(lowerCamelCase_) is not None:
UpperCamelCase__ : str = _re_import_struct_add_many.search(lowerCamelCase_).groups()[0].split(', ')
UpperCamelCase__ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_) > 0]
objects.extend(lowerCamelCase_)
elif _re_between_brackets.search(lowerCamelCase_) is not None:
UpperCamelCase__ : List[str] = _re_between_brackets.search(lowerCamelCase_).groups()[0].split(', ')
UpperCamelCase__ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_) > 0]
objects.extend(lowerCamelCase_)
elif _re_quote_object.search(lowerCamelCase_) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_).groups()[0])
elif line.startswith(' ' * 8 + '"'):
objects.append(line[9:-3])
elif line.startswith(' ' * 12 + '"'):
objects.append(line[13:-3])
line_index += 1
UpperCamelCase__ : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase__ : Optional[Any] = []
while (
line_index < len(lowerCamelCase_)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith('else')
):
UpperCamelCase__ : Optional[Any] = lines[line_index]
UpperCamelCase__ : Optional[Any] = _re_import.search(lowerCamelCase_)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith(' ' * 8):
objects.append(line[8:-2])
line_index += 1
UpperCamelCase__ : Tuple = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase__ : Optional[Any] = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
UpperCamelCase__ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
UpperCamelCase__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 8):
UpperCamelCase__ : Union[str, Any] = lines[line_index]
UpperCamelCase__ : Tuple = _re_import.search(lowerCamelCase_)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith(' ' * 12):
objects.append(line[12:-2])
line_index += 1
UpperCamelCase__ : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
def find_duplicates(lowerCamelCase_):
return [k for k, v in collections.Counter(lowerCamelCase_).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase__ : Tuple = []
for key in import_dict_objects.keys():
UpperCamelCase__ : Optional[int] = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}')
UpperCamelCase__ : Any = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
UpperCamelCase__ : Dict = 'base imports' if key == 'none' else f'{key} backend'
errors.append(f'Differences for {name}:')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.')
return errors
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : Tuple = []
for root, _, files in os.walk(lowerCamelCase_):
if "__init__.py" in files:
UpperCamelCase__ : Any = os.path.join(lowerCamelCase_ , '__init__.py')
UpperCamelCase__ : Union[str, Any] = parse_init(lowerCamelCase_)
if objects is not None:
UpperCamelCase__ : Any = analyze_results(*lowerCamelCase_)
if len(lowerCamelCase_) > 0:
UpperCamelCase__ : int = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowerCamelCase_))
if len(lowerCamelCase_) > 0:
raise ValueError('\n\n'.join(lowerCamelCase_))
def __UpperCAmelCase ( ) -> List[Any]:
UpperCamelCase__ : str = []
for path, directories, files in os.walk(lowerCamelCase_):
for folder in directories:
# Ignore private modules
if folder.startswith('_'):
directories.remove(lowerCamelCase_)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_) / folder).glob('*.py'))) == 0:
continue
UpperCamelCase__ : Union[str, Any] = str((Path(lowerCamelCase_) / folder).relative_to(lowerCamelCase_))
UpperCamelCase__ : Union[str, Any] = short_path.replace(os.path.sep , '.')
submodules.append(lowerCamelCase_)
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase__ : Optional[int] = str((Path(lowerCamelCase_) / fname).relative_to(lowerCamelCase_))
UpperCamelCase__ : int = short_path.replace('.py' , '').replace(os.path.sep , '.')
if len(submodule.split('.')) == 1:
submodules.append(lowerCamelCase_)
return submodules
lowerCAmelCase__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __UpperCAmelCase ( ) -> Dict:
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : Any = importlib.util.spec_from_file_location(
'transformers' , os.path.join(lowerCamelCase_ , '__init__.py') , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCamelCase__ : Any = spec.loader.load_module()
UpperCamelCase__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase_) > 0:
UpperCamelCase__ : Any = '\n'.join(f'- {module}' for module in module_not_registered)
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.')
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 596
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase (unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : int=4 , ):
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : int = use_attention_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Dict = num_choices
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = None
if self.use_attention_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase__ : Dict = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = config_and_inputs
UpperCamelCase__ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : List[Any] = FlaxAlbertModelTester(self)
@slow
def __UpperCamelCase ( self : int):
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Dict = model_class_name.from_pretrained('albert-base-v2')
UpperCamelCase__ : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase_)
@require_flax
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = FlaxAlbertModel.from_pretrained('albert-base-v2')
UpperCamelCase__ : Dict = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
UpperCamelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase__ : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
UpperCamelCase__ : List[str] = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_)
UpperCamelCase__ : Dict = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1e-4))
| 596
| 1
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 ) ->int:
lowercase_ = right or len(SCREAMING_SNAKE_CASE_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 603
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( __a ):
"""simple docstring"""
def __init__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Dict , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
'''simple docstring'''
if isinstance(self.unet.config.sample_size , lowercase_ ):
lowercase_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ ).prev_sample
lowercase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 603
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__SCREAMING_SNAKE_CASE = mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = max(
mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
__SCREAMING_SNAKE_CASE = val
return f[i][j]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__SCREAMING_SNAKE_CASE = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__SCREAMING_SNAKE_CASE = dp[i - 1][w_]
return dp[n][w_], dp
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(__UpperCAmelCase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
if num_items != len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(__UpperCAmelCase )} values"""
)
raise ValueError(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
if not isinstance(wt[i] , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = set()
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return optimal_val, example_optional_set
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
optimal_set.add(__UpperCAmelCase )
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , i - 1 , j - wt[i - 1] , __UpperCAmelCase )
if __name__ == "__main__":
a = [3, 2, 4, 4]
a = [4, 3, 2, 3]
a = 4
a = 6
a = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a , a = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a , a = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 109
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
a_ : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.feature_extractor
SCREAMING_SNAKE_CASE = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_UpperCamelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE = 'weight'
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE = int(items[1] )
else:
SCREAMING_SNAKE_CASE = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
_UpperCamelCase , add_adapter=_UpperCamelCase , adapter_stride=_UpperCamelCase , adapter_kernel_size=_UpperCamelCase , use_auth_token=_UpperCamelCase , output_hidden_size=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(_UpperCamelCase )
# load model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , use_auth_token=_UpperCamelCase )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE = WavaVecaModel(_UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , _UpperCamelCase )
# load decoder weights
SCREAMING_SNAKE_CASE = MBartForCausalLM(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCamelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
SCREAMING_SNAKE_CASE = SpeechEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = MBartaaTokenizer(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE = 'mbart50'
SCREAMING_SNAKE_CASE = 'wav2vec2'
SCREAMING_SNAKE_CASE = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE = 25_00_04
SCREAMING_SNAKE_CASE = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
a_ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 439
| 0
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Dict ='''sshleifer/mar_enro_6_3_student'''
class a__ ( lowerCAmelCase__ ):
def _lowerCamelCase ( self ) -> List[Any]:
super().setUp()
__A = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=lowercase__ , )
__A = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _lowerCamelCase ( self ) -> str:
MarianMTModel.from_pretrained(lowercase__ )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ) -> Any:
__A = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
__A = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
__A = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
__A = bash_script.replace(lowercase__ , str(lowercase__ ) )
__A = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__A = F"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__A = ["finetune.py"] + bash_script.split() + args
with patch.object(lowercase__ , "argv" , lowercase__ ):
__A = argparse.ArgumentParser()
__A = pl.Trainer.add_argparse_args(lowercase__ )
__A = SummarizationModule.add_model_specific_args(lowercase__ , os.getcwd() )
__A = parser.parse_args()
__A = main(lowercase__ )
# Check metrics
__A = load_json(model.metrics_save_path )
__A = metrics["val"][0]
__A = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , lowercase__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__A = os.listdir(lowercase__ )
__A = [x for x in contents if x.endswith(".ckpt" )][0]
__A = os.path.join(args.output_dir , lowercase__ )
__A = torch.load(lowercase__ , map_location="cpu" )
__A = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__A = {os.path.basename(lowercase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class a__ ( lowerCAmelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ) -> str:
__A = F"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__A = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
__A = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
__A = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
__A = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
__A = bash_script.replace(lowercase__ , str(lowercase__ ) )
__A = self.get_auto_remove_tmp_dir()
__A = bash_script.replace("--fp16" , "" )
__A = 6
__A = (
["distillation.py"]
+ bash_script.split()
+ [
F"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
F"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(lowercase__ , "argv" , lowercase__ ):
__A = argparse.ArgumentParser()
__A = pl.Trainer.add_argparse_args(lowercase__ )
__A = SummarizationDistiller.add_model_specific_args(lowercase__ , os.getcwd() )
__A = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__A = distill_main(lowercase__ )
# Check metrics
__A = load_json(model.metrics_save_path )
__A = metrics["val"][0]
__A = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , lowercase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
__A = os.listdir(lowercase__ )
__A = [x for x in contents if x.endswith(".ckpt" )][0]
__A = os.path.join(args.output_dir , lowercase__ )
__A = torch.load(lowercase__ , map_location="cpu" )
__A = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__A = {os.path.basename(lowercase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 711
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=10 , lowercase__=3 , lowercase__=2 , lowercase__=2 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__="divided_space_time" , lowercase__=None , ) -> Any:
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = patch_size
__A = num_frames
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = attention_type
__A = initializer_range
__A = scope
__A = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__A = (image_size // patch_size) ** 2
__A = (num_frames) * self.num_patches_per_frame + 1
def _lowerCamelCase ( self ) -> Any:
__A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> List[Any]:
__A = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__A = self.num_labels
return config
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__A = TimesformerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__A = TimesformerForVideoClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
# verify the logits shape
__A = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ : Optional[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Dict = False
def _lowerCamelCase ( self ) -> int:
__A = TimesformerModelTester(self )
__A = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ) -> int:
__A = copy.deepcopy(lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def _lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def _lowerCamelCase ( self ) -> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _lowerCamelCase ( self ) -> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase__ )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TimesformerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( self ) -> str:
if not self.has_attentions:
pass
else:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
for model_class in self.all_model_classes:
__A = self.model_tester.seq_length
__A = self.model_tester.num_frames
__A = True
__A = False
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__A = len(lowercase__ )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + 1 , len(lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.hidden_states
__A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase__ ) , lowercase__ )
__A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__A = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
__A = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase__ )
__A = self.default_image_processor
__A = prepare_video()
__A = image_processor(video[:8] , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
__A = model(**lowercase__ )
# verify the logits
__A = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__A = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 205
| 0
|
_lowerCAmelCase: Tuple = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _lowercase( __a : str ):
a__ ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
a__ =Stack()
a__ =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
a__ =operator_stack.peek()
operator_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase: Dict = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20
|
def __magic_name__ ( SCREAMING_SNAKE_CASE = 50 ) -> int:
_lowercase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['MobileNetV2FeatureExtractor']
snake_case_ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 388
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ = ['text', 'image', 'audio']
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Dict:
__snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase__ ( snake_case_ : List ) -> int:
__snake_case = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE__ :
def a (self : List[str] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input , a__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*a__ )
# There is a single output
if len(self.tool.outputs ) == 1:
__snake_case = [outputs]
self.assertListEqual(output_types(a__ ) , self.tool.outputs )
def a (self : Tuple ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*a__ )
if not isinstance(a__ , a__ ):
__snake_case = [outputs]
self.assertEqual(len(a__ ) , len(self.tool.outputs ) )
for output, output_type in zip(a__ , self.tool.outputs ):
__snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a__ , a__ ) )
def a (self : int ):
"""simple docstring"""
__snake_case = create_inputs(self.tool.inputs )
__snake_case = []
for _input, input_type in zip(a__ , self.tool.inputs ):
if isinstance(a__ , a__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__snake_case = self.tool(*a__ )
if not isinstance(a__ , a__ ):
__snake_case = [outputs]
self.assertEqual(len(a__ ) , len(self.tool.outputs ) )
| 388
| 1
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
A = logging.WARNING
def SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = os.getenv('DATASETS_VERBOSITY' , lowerCAmelCase__)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
return __name__.split('.')[0]
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
return logging.getLogger(_get_library_name())
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
_lowercase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[str] = None) -> Optional[int]:
'''simple docstring'''
if name is None:
_lowercase : int = _get_library_name()
return logging.getLogger(lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int) -> Dict:
'''simple docstring'''
_get_library_root_logger().setLevel(lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
return set_verbosity(lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
return set_verbosity(lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(lowerCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
_lowercase : str = False
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] ,*UpperCamelCase : str ,**UpperCamelCase : str ) -> str: # pylint: disable=unused-argument
_lowercase : int = args[0] if args else None
def __iter__( self : Optional[Any] ) -> List[str]:
return iter(self._iterator )
def __getattr__( self : List[str] ,UpperCamelCase : Tuple ) -> str:
def empty_fn(*UpperCamelCase : Tuple ,**UpperCamelCase : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ) -> Optional[int]:
return self
def __exit__( self : Any ,UpperCamelCase : Optional[Any] ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Dict ) -> Dict:
return
A = True
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : List[Any] ,*UpperCamelCase : Any ,UpperCamelCase : Any=False ,**UpperCamelCase : Dict ) -> List[Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCAmelCase_ ,**UpperCAmelCase_ )
else:
return EmptyTqdm(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _lowerCamelCase ( self : Optional[Any] ,*UpperCamelCase : Any ,**UpperCamelCase : Tuple ) -> Dict:
_lowercase : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A = _tqdm_cls()
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
global _tqdm_active
_lowercase : Any = True
def SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
global _tqdm_active
_lowercase : Union[str, Any] = False
| 125
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = "gpt_bigcode"
SCREAMING_SNAKE_CASE_: str = ["past_key_values"]
SCREAMING_SNAKE_CASE_: str = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , UpperCAmelCase_ : Dict=50_257 , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple="gelu_pytorch_tanh" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=50_256 , UpperCAmelCase_ : Tuple=50_256 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 580
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _A ( unittest.TestCase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = '''ylacombe/bark-small'''
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = '''en_speaker_1'''
_UpperCAmelCase = '''This is a test string'''
_UpperCAmelCase = '''speaker_embeddings_path.json'''
_UpperCAmelCase = '''speaker_embeddings'''
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase ( self ):
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 35
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
'''semantic_prompt''': np.ones(UpperCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
_UpperCAmelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
_UpperCAmelCase = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
_UpperCAmelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=UpperCamelCase__ )
_UpperCAmelCase = processor(text=self.input_string )
_UpperCAmelCase = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 712
|
# flake8: noqa
# Lint as: python3
a = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 175
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any ) -> Dict:
'''simple docstring'''
_A = a.name
_A = b.name
_A = ''
_A = ''
_A = a == b
_A = name_a
_A = name_b
return res
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_snake_case , _snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Any ) -> int:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_snake_case , _snake_case , _snake_case )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
_A = list(model.graph.initializer )
_A = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_A = inits[i].name
_A = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case )
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
_A = os.path.dirname(_snake_case )
_A = os.path.basename(_snake_case )
_A = onnx.load(os.path.join(_snake_case , _snake_case ) )
_A = list(model.graph.initializer )
_A = set()
_A = {}
_A = []
_A = 0
for i in range(len(_snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_snake_case )
dup_set.add(_snake_case )
_A = inits[j].data_type
_A = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _snake_case )
total_reduced_size += mem_size
_A = inits[i].name
_A = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_snake_case )
else:
_A = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
_A = sorted(_snake_case )
_remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case )
_A = 'optimized_' + model_file_name
_A = os.path.join(_snake_case , _snake_case )
onnx.save(_snake_case , _snake_case )
return new_model
| 7
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 510
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 173
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""DeiTFeatureExtractor"""]
__A = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
| 1
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __a ( __UpperCamelCase ):
__snake_case : Optional[int] = """EncodecFeatureExtractor"""
__snake_case : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.feature_extractor
lowerCAmelCase_ : str = False
def A ( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase , language=UpperCAmelCase , no_timestamps=UpperCAmelCase )
def __call__( self : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""audio""" , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = kwargs.pop("""sampling_rate""" , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""text""" , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
lowerCAmelCase_ : Dict = args[0]
lowerCAmelCase_ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
if audio is not None:
lowerCAmelCase_ : Tuple = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase_ : Union[str, Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase_ : Optional[int] = audio_inputs["""padding_mask"""]
return inputs
def A ( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
lowerCAmelCase_ : int = kwargs.pop("""audio""" , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""padding_mask""" , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
lowerCAmelCase_ : Optional[Any] = args[0]
lowerCAmelCase_ : str = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCAmelCase , padding_mask=UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Tuple ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional = None ):
lowerCAmelCase_ : List[Any] = to_numpy(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = audio_values.shape
if padding_mask is None:
return list(UpperCAmelCase )
lowerCAmelCase_ : Tuple = to_numpy(UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase_ : Any = seq_len - padding_mask.shape[-1]
lowerCAmelCase_ : Any = 1 - self.feature_extractor.padding_value
lowerCAmelCase_ : List[Any] = np.pad(UpperCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=UpperCAmelCase )
lowerCAmelCase_ : List[str] = audio_values.tolist()
for i in range(UpperCAmelCase ):
lowerCAmelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase_ : List[str] = sliced_audio.reshape(UpperCAmelCase , -1 )
return audio_values
| 600
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = CodeGenTokenizer
__snake_case : Optional[int] = CodeGenTokenizerFast
__snake_case : int = True
__snake_case : Tuple = {"""add_prefix_space""": True}
__snake_case : Any = False
def A ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowerCAmelCase_ : int = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCAmelCase_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase_ : int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase ) )
def A ( self : Tuple , **UpperCAmelCase : Any ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A ( self : Optional[Any] , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A ( self : int , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Tuple = """lower newer"""
lowerCAmelCase_ : Optional[Any] = """lower newer"""
return input_text, output_text
def A ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Any = """lower newer"""
lowerCAmelCase_ : List[Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCAmelCase_ : List[str] = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Dict = self.get_tokenizer()
lowerCAmelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = """lower newer"""
# Testing tokenization
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : int = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing the unknown token
lowerCAmelCase_ : str = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A ( self : Tuple , UpperCAmelCase : str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
lowerCAmelCase_ : Union[str, Any] = """This is a simple input"""
lowerCAmelCase_ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase_ : List[str] = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase_ : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" , )
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowerCAmelCase_ : Dict = """This is a simple input"""
lowerCAmelCase_ : Union[str, Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowerCAmelCase_ : int = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase_ : Optional[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowerCAmelCase_ : Optional[Any] = tokenizer.pad_token_id
lowerCAmelCase_ : Tuple = tokenizer(UpperCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
lowerCAmelCase_ : List[Any] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="""np""" )
lowerCAmelCase_ : int = tokenizer(*UpperCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
lowerCAmelCase_ : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def A ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = """$$$"""
lowerCAmelCase_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase , add_bos_token=UpperCAmelCase )
lowerCAmelCase_ : Any = """This is a simple input"""
lowerCAmelCase_ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase_ : Optional[int] = tokenizer.bos_token_id
lowerCAmelCase_ : Any = tokenizer(UpperCAmelCase )
lowerCAmelCase_ : str = tokenizer(UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : str = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A ( self : int ):
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
lowerCAmelCase_ : List[Any] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowerCAmelCase_ : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
lowerCAmelCase_ : List[str] = tokenizer.encode(UpperCAmelCase )
lowerCAmelCase_ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowerCAmelCase_ : Optional[Any] = tokenizer.decode(UpperCAmelCase , truncate_before_pattern=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
pass
| 600
| 1
|
UpperCAmelCase__ : List[Any] ={0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase__ : Union[str, Any] ={0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[int]:
lowerCamelCase =True
lowerCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> list[int]:
lowerCamelCase =True
lowerCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def _lowercase ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase =len(UpperCamelCase__ ) * [False]
lowerCamelCase ={vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase =[]
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase =[]
lowerCamelCase =len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase =order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase =find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 720
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase__ : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , ) -> int:
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> Tuple:
lowerCamelCase =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase ="""cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCamelCase ="""cpu"""
lowerCamelCase =Path(_UpperCAmelCase )
# VAE DECODER
lowerCamelCase =AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowerCamelCase =vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase =vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase__ : Union[str, Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 269
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[Any]=30 , SCREAMING_SNAKE_CASE : Dict=400 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Tuple=0.9 , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[Any]=[0.5, 0.5, 0.5] , ):
lowercase__ : Any = size if size is not None else {"shortest_edge": 30}
lowercase__ : Any = crop_size if crop_size is not None else {"height": 30, "width": 30}
lowercase__ : Optional[int] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : str = do_resize_and_center_crop
lowercase__ : Optional[int] = size
lowercase__ : Optional[Any] = crop_pct
lowercase__ : Tuple = crop_size
lowercase__ : List[Any] = do_normalize
lowercase__ : Any = image_mean
lowercase__ : Tuple = image_std
def snake_case ( self : Tuple ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__(UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = PoolFormerImageProcessor if is_vision_available() else None
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def snake_case ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "crop_pct" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
def snake_case ( self : Dict ):
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : int ):
lowercase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : List[str] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : int ):
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : List[str] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Dict = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 496
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self ):
"""simple docstring"""
a_ = 1
a_ = 3
a_ = (32, 32)
a_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_UpperCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
def extract(*_UpperCAmelCase , **_UpperCAmelCase ):
class lowercase_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = torch.ones([0] )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def lowercase__ ( self ):
"""simple docstring"""
a_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ = self.dummy_cond_unet
a_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """A painting of a squirrel eating a burger"""
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
a_ = output.images
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_UpperCAmelCase , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ = self.dummy_cond_unet
a_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """A painting of a squirrel eating a burger"""
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
a_ = output.images
a_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_UpperCAmelCase , )[0]
a_ = image[0, -3:, -3:, -1]
a_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(pipe.scheduler , _UpperCAmelCase )
assert pipe.safety_checker is None
a_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
a_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.dummy_cond_unet
a_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
a_ = self.dummy_vae
a_ = self.dummy_text_encoder
a_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
a_ = unet.half()
a_ = vae.half()
a_ = bert.half()
# make sure here that pndm scheduler skips prk
a_ = StableDiffusionPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """A painting of a squirrel eating a burger"""
a_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_UpperCAmelCase )
a_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
a_ = 4_003_660_346
a_ = 7
# without safety guidance (sld_guidance_scale = 0)
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_UpperCAmelCase )
a_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = """padme amidala taking a bath artwork, safe for work, no nudity"""
a_ = 2_734_971_755
a_ = 7
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
a_ = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
a_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
a_ = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
a_ = 1_044_355_234
a_ = 12
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
a_ = torch.manual_seed(_UpperCAmelCase )
a_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a_ = output.images
a_ = image[0, -3:, -3:, -1]
a_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 483
| 0
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCamelCase = '''sshleifer/bart-tiny-random'''
__lowerCamelCase = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class a__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase( self : Optional[Any] ):
return AutoConfig.from_pretrained(lowerCamelCase_ )
def UpperCAmelCase( self : List[Any] ):
a_ : List[str] = create_student_by_copying_alternating_layers(lowerCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase( self : Union[str, Any] ):
a_ : Dict = create_student_by_copying_alternating_layers(lowerCamelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCamelCase_ )
def UpperCAmelCase( self : Dict ):
a_ : Optional[Any] = create_student_by_copying_alternating_layers(lowerCamelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCamelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase( self : Union[str, Any] ):
a_ : Tuple = create_student_by_copying_alternating_layers(lowerCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase( self : List[Any] ):
with self.assertRaises(lowerCamelCase_ ):
create_student_by_copying_alternating_layers(lowerCamelCase_ , tempfile.mkdtemp() , e=lowerCamelCase_ , d=lowerCamelCase_ )
| 707
|
from datetime import datetime as dt
import os
from github import Github
__lowerCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _a ( ):
a_ : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
a_ : str = g.get_repo("""huggingface/transformers""" )
a_ : List[str] = repo.get_issues(state="""open""" )
for issue in open_issues:
a_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCamelCase : i.created_at , reverse=__UpperCamelCase )
a_ : List[Any] = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 478
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A_ ( lowercase_ ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A_ ( ) -> Optional[Any]:
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
_snake_case : List[str] = [1, 2, 3]
with pytest.raises(lowercase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowercase_ , lowercase_ , num_proc=2 )
with pytest.raises(lowercase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowercase_ , lowercase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def A_ ( lowercase_ ) -> List[Any]:
_snake_case : Dict = [1, 2]
_snake_case : int = {'''a''': 1, '''b''': 2}
_snake_case : int = {'''a''': [1, 2], '''b''': [3, 4]}
_snake_case : List[str] = {'''a''': {'''1''': 1}, '''b''': 2}
_snake_case : Optional[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_snake_case : List[Any] = [2, 3]
_snake_case : Optional[Any] = {'''a''': 2, '''b''': 3}
_snake_case : Optional[int] = {'''a''': [2, 3], '''b''': [4, 5]}
_snake_case : Any = {'''a''': {'''1''': 2}, '''b''': 3}
_snake_case : List[str] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
| 326
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModel)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 326
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE (lowercase_ ):
lowerCAmelCase = ''''''
lowerCAmelCase = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(self , **lowerCamelCase_)
__A : Dict = repo_info
__A : Any = token
__A : Tuple = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.dir_cache is None:
__A : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__A : Optional[Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase_): {'name': str(lowerCamelCase_), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = "rb" , **_UpperCAmelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , lowerCamelCase_):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}')
__A : Dict = hf_hub_url(self.repo_info.id , lowerCamelCase_ , revision=self.repo_info.sha)
return fsspec.open(
lowerCamelCase_ , mode=lowerCamelCase_ , headers=get_authentication_headers_for_url(lowerCamelCase_ , use_auth_token=self.token) , client_kwargs={'trust_env': True} , ).open()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
self._get_dirs()
__A : Union[str, Any] = self._strip_protocol(lowerCamelCase_)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase):
'''simple docstring'''
self._get_dirs()
__A : str = PurePosixPath(path.strip('/'))
__A : Optional[int] = {}
for p, f in self.dir_cache.items():
__A : List[Any] = PurePosixPath(p.strip('/'))
__A : List[str] = p.parent
if root == path:
__A : Dict = f
__A : List[Any] = list(paths.values())
if detail:
return out
else:
return sorted(f['name'] for f in out)
| 713
|
'''simple docstring'''
lowercase__ : List[Any] = '''Input must be a string of 8 numbers plus letter'''
lowercase__ : Optional[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _lowerCAmelCase ( __snake_case : str ) -> bool:
if not isinstance(__snake_case , __snake_case ):
__A : str = f'Expected string as input, found {type(__snake_case ).__name__}'
raise TypeError(__snake_case )
__A : Any = spanish_id.replace('-' , '' ).upper()
if len(__snake_case ) != 9:
raise ValueError(__snake_case )
try:
__A : Optional[Any] = int(spanish_id_clean[0:8] )
__A : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__snake_case ) from ex
if letter.isdigit():
raise ValueError(__snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "ibert"
def __init__( self : Union[str, Any] ,A : Tuple=3_05_22 ,A : Optional[Any]=7_68 ,A : List[Any]=12 ,A : Optional[Any]=12 ,A : List[str]=30_72 ,A : Union[str, Any]="gelu" ,A : str=0.1 ,A : int=0.1 ,A : Dict=5_12 ,A : str=2 ,A : Any=0.02 ,A : str=1E-12 ,A : List[str]=1 ,A : str=0 ,A : Optional[Any]=2 ,A : Union[str, Any]="absolute" ,A : Optional[int]=False ,A : Any="none" ,**A : str ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = quant_mode
__A = force_dequant
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_a : int = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a : Optional[str] =field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
a : int =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a : Optional[int] =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
a : Optional[str] =field(default=lowerCAmelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowerCAmelCase = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCAmelCase = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowerCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCAmelCase = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCAmelCase = data_args.train_file.split(""".""" )[-1]
__lowerCAmelCase = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCAmelCase = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowerCAmelCase = load_dataset("""csv""" , data_files=lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCAmelCase = load_dataset("""json""" , data_files=lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCAmelCase = raw_datasets["""train"""].features["""label"""].names
__lowerCAmelCase = len(lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase , )
__lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCAmelCase = {"""Refused""": 0, """Entailed""": 1}
__lowerCAmelCase = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase ):
__lowerCAmelCase = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCAmelCase = examples["""statement"""]
__lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowerCAmelCase = tokenizer(lowercase , lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase )
__lowerCAmelCase = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowerCAmelCase = raw_datasets.map(
lowercase , batched=lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowerCAmelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowerCAmelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowerCAmelCase = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
__lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase ) else p.predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 )
else:
__lowerCAmelCase = None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowercase , args=lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowercase )
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase )
trainer.save_metrics("""train""" , lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate(eval_dataset=lowercase )
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
__lowerCAmelCase = min(lowercase , len(lowercase ) )
trainer.log_metrics("""eval""" , lowercase )
trainer.save_metrics("""eval""" , lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCAmelCase = predict_dataset.remove_columns("""label""" )
__lowerCAmelCase = trainer.predict(lowercase , metric_key_prefix="""predict""" ).predictions
__lowerCAmelCase = np.argmax(lowercase , axis=1 )
__lowerCAmelCase = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase ):
__lowerCAmelCase = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCAmelCase = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def _lowerCAmelCase ( lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( a__ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 703
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase_ = get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase = None ) -> str:
'''simple docstring'''
_lowercase =(
os.path.join(lowerCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowercase =Extractor
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowercase =os.path.abspath(lowerCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowerCAmelCase ) and not (os.path.isdir(lowerCAmelCase ) and os.listdir(lowerCAmelCase ))
)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> str:
'''simple docstring'''
_lowercase =self.extractor.infer_extractor_format(lowerCAmelCase )
if not extractor_format:
return input_path
_lowercase =self._get_output_path(lowerCAmelCase )
if self._do_extract(lowerCAmelCase , lowerCAmelCase ):
self.extractor.extract(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return output_path
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@classmethod
@abstractmethod
def A__ ( cls , lowerCAmelCase , **lowerCAmelCase ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
...
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a = []
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase , 'rb' ) as f:
return f.read(lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_lowercase =max(len(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
_lowercase =cls.read_magic_number(lowerCAmelCase , lowerCAmelCase )
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@classmethod
def A__ ( cls , lowerCAmelCase , **lowerCAmelCase ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(lowerCAmelCase )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
def resolved(lowerCAmelCase ) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase ) )
def badpath(lowerCAmelCase , lowerCAmelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase , lowerCAmelCase ) ).startswith(lowerCAmelCase )
def badlink(lowerCAmelCase , lowerCAmelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowercase =resolved(os.path.join(lowerCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCAmelCase )
_lowercase =resolved(lowerCAmelCase )
for finfo in members:
if badpath(finfo.name , lowerCAmelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowerCAmelCase , lowerCAmelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowerCAmelCase , lowerCAmelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowercase =tarfile.open(lowerCAmelCase )
tar_file.extractall(lowerCAmelCase , members=TarExtractor.safemembers(lowerCAmelCase , lowerCAmelCase ) )
tar_file.close()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""\x1F\x8B"""]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
with gzip.open(lowerCAmelCase , 'rb' ) as gzip_file:
with open(lowerCAmelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(lowerCAmelCase , magic_number=lowerCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase , 'rb' ) as fp:
_lowercase =_EndRecData(lowerCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowercase =fp.read(lowerCAmelCase ) # CD is where we expect it to be
if len(lowerCAmelCase ) == sizeCentralDir:
_lowercase =struct.unpack(lowerCAmelCase , lowerCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with zipfile.ZipFile(lowerCAmelCase , 'r' ) as zip_file:
zip_file.extractall(lowerCAmelCase )
zip_file.close()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
with lzma.open(lowerCAmelCase ) as compressed_file:
with open(lowerCAmelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowercase =rarfile.RarFile(lowerCAmelCase )
rf.extractall(lowerCAmelCase )
rf.close()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
_lowercase =zstd.ZstdDecompressor()
with open(lowerCAmelCase , 'rb' ) as ifh, open(lowerCAmelCase , 'wb' ) as ofh:
dctx.copy_stream(lowerCAmelCase , lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""\x42\x5A\x68"""]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
with bza.open(lowerCAmelCase , 'rb' ) as compressed_file:
with open(lowerCAmelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with pyazr.SevenZipFile(lowerCAmelCase , 'r' ) as archive:
archive.extractall(lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = [b"""\x04\x22\x4D\x18"""]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(lowerCAmelCase , 'rb' ) as compressed_file:
with open(lowerCAmelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
class __lowerCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_a = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A__ ( cls ) -> str:
'''simple docstring'''
return max(
len(lowerCAmelCase )
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase , lowerCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase , magic_number_length=lowerCAmelCase )
except OSError:
return b""
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = False ) -> bool:
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=lowerCAmelCase , )
_lowercase =cls.infer_extractor_format(lowerCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A__ ( cls , lowerCAmelCase ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_lowercase =cls._get_magic_number_max_length()
_lowercase =cls._read_magic_number(lowerCAmelCase , lowerCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase , magic_number=lowerCAmelCase ):
return extractor_format
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(lowerCAmelCase ) , exist_ok=lowerCAmelCase )
# Prevent parallel extractions
_lowercase =str(Path(lowerCAmelCase ).with_suffix('.lock' ) )
with FileLock(lowerCAmelCase ):
shutil.rmtree(lowerCAmelCase , ignore_errors=lowerCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase , lowerCAmelCase ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=lowerCAmelCase , )
_lowercase =extractor if extractor != 'deprecated' else extractor_format
else:
_lowercase =cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase , lowerCAmelCase )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase ):
return extractor.extract(lowerCAmelCase , lowerCAmelCase )
| 380
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , _A : List[str]=None , **_A : Optional[int] ):
super().__init__(features=_A )
_UpperCamelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase_ ( self : List[Any] , _A : int ):
import torch
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_A )
return column
def UpperCamelCase_ ( self : Optional[int] , _A : str ):
import torch
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCamelCase = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_UpperCamelCase = {'''dtype''': torch.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCamelCase = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
_UpperCamelCase = np.asarray(_A )
return torch.tensor(_A , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] ):
import torch
# support for torch, tf, jax etc.
if hasattr(_A , '''__array__''' ) and not isinstance(_A , torch.Tensor ):
_UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def UpperCamelCase_ ( self : Optional[int] , _A : dict ):
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def UpperCamelCase_ ( self : Optional[int] , _A : pa.Table ):
_UpperCamelCase = self.numpy_arrow_extractor().extract_row(_A )
_UpperCamelCase = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def UpperCamelCase_ ( self : str , _A : pa.Table ):
_UpperCamelCase = self.numpy_arrow_extractor().extract_column(_A )
_UpperCamelCase = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
_UpperCamelCase = self.recursive_tensorize(_A )
_UpperCamelCase = self._consolidate(_A )
return column
def UpperCamelCase_ ( self : Optional[int] , _A : pa.Table ):
_UpperCamelCase = self.numpy_arrow_extractor().extract_batch(_A )
_UpperCamelCase = self.python_features_decoder.decode_batch(_A )
_UpperCamelCase = self.recursive_tensorize(_A )
for column_name in batch:
_UpperCamelCase = self._consolidate(batch[column_name] )
return batch
| 10
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = AltDiffusionPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowercase_ = 77
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(UpperCamelCase__ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = """A photo of an astronaut"""
lowercase_ = alt_pipe(**UpperCamelCase__ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(UpperCamelCase__ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = alt_pipe(**UpperCamelCase__ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
lowercase_ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
lowercase_ = alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 412
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
UpperCAmelCase__ : Optional[Any] = str(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = ''''''.join(sorted(lowerCAmelCase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a__ ( lowerCAmelCase__ = 99 ) -> int:
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[Any] = 1
while True:
if check_bouncy(lowerCAmelCase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(9_9)}""")
| 701
|
'''simple docstring'''
from timeit import timeit
def a__ ( lowerCAmelCase__ ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase__ ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase__ : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ) -> None:
def do_benchmark(lowerCAmelCase__ ) -> None:
UpperCAmelCase__ : Optional[Any] = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }""" )
UpperCAmelCase__ : Optional[int] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=lowerCAmelCase__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }""" )
UpperCAmelCase__ : Optional[int] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=lowerCAmelCase__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 312
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A (__lowerCamelCase :Dict , __lowerCamelCase :Union[str, Any]=False ):
try:
_lowerCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCAmelCase = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
_lowercase = parse_flag_from_env("""RUN_SLOW""", default=False)
def A (__lowerCamelCase :Any ):
return unittest.skip("""Test was skipped""" )(__UpperCAmelCase )
def A (__lowerCamelCase :int ):
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__UpperCAmelCase )
def A (__lowerCamelCase :int ):
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__UpperCAmelCase )
def A (__lowerCamelCase :List[str] ):
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__UpperCAmelCase )
def A (__lowerCamelCase :str ):
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Union[str, Any] ):
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Any ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Tuple ):
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Optional[Any] ):
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Tuple ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__UpperCAmelCase )
def A (__lowerCamelCase :int ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__UpperCAmelCase )
def A (__lowerCamelCase :List[Any] ):
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__UpperCAmelCase )
def A (__lowerCamelCase :List[Any] ):
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__UpperCAmelCase )
def A (__lowerCamelCase :int ):
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Optional[Any]=None , __lowerCamelCase :Tuple=None ):
if test_case is None:
return partial(__UpperCAmelCase , version=__UpperCAmelCase )
return unittest.skipUnless(is_torch_version(""">=""" , __UpperCAmelCase ) , f'test requires torch version >= {version}' )(__UpperCAmelCase )
def A (__lowerCamelCase :int ):
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__UpperCAmelCase )
def A (__lowerCamelCase :List[str] ):
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__UpperCAmelCase )
def A (__lowerCamelCase :Optional[int] ):
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__UpperCAmelCase )
_lowercase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A (__lowerCamelCase :Optional[int] ):
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__UpperCAmelCase )
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowercase : Tuple = True
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowercase ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_lowercase )
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = mocks if isinstance(_lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = AcceleratorState()
_lowerCAmelCase = tensor[None].clone().to(state.device )
_lowerCAmelCase = gather(__UpperCAmelCase ).cpu()
_lowerCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCAmelCase ):
return False
return True
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = returncode
_lowerCAmelCase = stdout
_lowerCAmelCase = stderr
async def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :Optional[int] ):
while True:
_lowerCAmelCase = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def A (__lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :List[Any]=None , __lowerCamelCase :int=None , __lowerCamelCase :List[str]=False , __lowerCamelCase :str=False ):
if echo:
print("""\nRunning: """ , """ """.join(__UpperCAmelCase ) )
_lowerCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCAmelCase = []
_lowerCAmelCase = []
def tee(__lowerCamelCase :Dict , __lowerCamelCase :Optional[int] , __lowerCamelCase :str , __lowerCamelCase :Tuple="" ):
_lowerCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCamelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCamelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Optional[Any]=None , __lowerCamelCase :Optional[Any]=None , __lowerCamelCase :Optional[Any]=180 , __lowerCamelCase :List[str]=False , __lowerCamelCase :List[Any]=True ):
_lowerCAmelCase = asyncio.get_event_loop()
_lowerCAmelCase = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
_lowerCAmelCase = ''' '''.join(__UpperCAmelCase )
if result.returncode > 0:
_lowerCAmelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
pass
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Union[str, Any]=False ):
try:
_lowerCAmelCase = subprocess.check_output(__UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCAmelCase , """decode""" ):
_lowerCAmelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(__UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 5
|
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> float:
return 10 - x * x
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) >= 0:
raise ValueError('''Wrong space!''' )
UpperCamelCase__ : Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase__ : int = (a + b) / 2
# Check if middle point is root
if equation(__UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCAmelCase ) * equation(__UpperCAmelCase ) < 0:
UpperCamelCase__ : str = c
else:
UpperCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 253
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = int(max_value - min_value ) + 1
SCREAMING_SNAKE_CASE = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 406
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
_UpperCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase : Dict = frozenset([] )
def lowerCAmelCase ( self : Any):
torch.manual_seed(0)
__lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_lowerCAmelCase ,)
__lowerCamelCase : Optional[int] = PNDMScheduler(skip_prk_steps=_lowerCAmelCase)
torch.manual_seed(0)
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0)
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='gelu' ,projection_dim=5_1_2 ,)
__lowerCamelCase : Optional[int] = CLIPTextModel(_lowerCAmelCase)
__lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowerCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_lowerCAmelCase)).to(_lowerCAmelCase)
__lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1)[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(_lowerCAmelCase)).convert('RGB').resize((6_4, 6_4))
__lowerCamelCase : Any = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((6_4, 6_4))
if str(_lowerCAmelCase).startswith('mps'):
__lowerCamelCase : Tuple = torch.manual_seed(_lowerCAmelCase)
else:
__lowerCamelCase : Optional[Any] = torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowerCamelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : int):
__lowerCamelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[Any] = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = StableDiffusionInpaintPipeline(**_lowerCAmelCase)
__lowerCamelCase : List[str] = sd_pipe.to(_lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowerCamelCase : Any = self.get_dummy_inputs(_lowerCAmelCase)
__lowerCamelCase : List[str] = sd_pipe(**_lowerCAmelCase).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Any):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Any):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowerCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
__lowerCamelCase : Dict = 'stabilityai/stable-diffusion-2-inpainting'
__lowerCamelCase : str = StableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase ,safety_checker=_lowerCAmelCase)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
pipe.enable_attention_slicing()
__lowerCamelCase : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__lowerCamelCase : Optional[int] = torch.manual_seed(0)
__lowerCamelCase : str = pipe(
prompt=_lowerCAmelCase ,image=_lowerCAmelCase ,mask_image=_lowerCAmelCase ,generator=_lowerCAmelCase ,output_type='np' ,)
__lowerCamelCase : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9E-3
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
__lowerCamelCase : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
__lowerCamelCase : str = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCAmelCase ,torch_dtype=torch.floataa ,safety_checker=_lowerCAmelCase ,)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
pipe.enable_attention_slicing()
__lowerCamelCase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__lowerCamelCase : Optional[int] = torch.manual_seed(0)
__lowerCamelCase : Any = pipe(
prompt=_lowerCAmelCase ,image=_lowerCAmelCase ,mask_image=_lowerCAmelCase ,generator=_lowerCAmelCase ,output_type='np' ,)
__lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5E-1
def lowerCAmelCase ( self : Optional[Any]):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowerCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowerCamelCase : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
__lowerCamelCase : Union[str, Any] = PNDMScheduler.from_pretrained(_lowerCAmelCase ,subfolder='scheduler')
__lowerCamelCase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : Optional[int] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__lowerCamelCase : str = torch.manual_seed(0)
__lowerCamelCase : Optional[int] = pipe(
prompt=_lowerCAmelCase ,image=_lowerCAmelCase ,mask_image=_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=2 ,output_type='np' ,)
__lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 652
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] , _lowerCAmelCase : T ) -> List[str]:
"""simple docstring"""
__lowercase = data
__lowercase = None
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return F'{self.data}'
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase = None
def __iter__( self : int ) -> Iterator[T]:
"""simple docstring"""
__lowercase = self.top
while node:
yield node.data
__lowercase = node.next
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self : str ) -> bool:
"""simple docstring"""
return self.top is None
def _a ( self : List[str] , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
__lowercase = Node(_lowerCAmelCase )
if not self.is_empty():
__lowercase = self.top
__lowercase = node
def _a ( self : Union[str, Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _lowerCAmelCase )
__lowercase = self.top
__lowercase = self.top.next
return pop_node.data
def _a ( self : int ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def _a ( self : int ) -> None:
"""simple docstring"""
__lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'deberta-v2'
def __init__( self , __UpperCAmelCase=128_100 , __UpperCAmelCase=1_536 , __UpperCAmelCase=24 , __UpperCAmelCase=24 , __UpperCAmelCase=6_144 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any =num_attention_heads
SCREAMING_SNAKE_CASE_ : Any =intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE_ : str =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =relative_attention
SCREAMING_SNAKE_CASE_ : Optional[int] =max_relative_positions
SCREAMING_SNAKE_CASE_ : Optional[Any] =pad_token_id
SCREAMING_SNAKE_CASE_ : int =position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
SCREAMING_SNAKE_CASE_ : Dict =[x.strip() for x in pos_att_type.lower().split('|' )]
SCREAMING_SNAKE_CASE_ : List[str] =pos_att_type
SCREAMING_SNAKE_CASE_ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple =kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =pooler_dropout
SCREAMING_SNAKE_CASE_ : Tuple =pooler_hidden_act
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ : str ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def __lowerCamelCase ( self ):
return 12
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 40 , __UpperCAmelCase = 40 , __UpperCAmelCase = None , ):
SCREAMING_SNAKE_CASE_ : Dict =super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 153
|
from __future__ import annotations
__SCREAMING_SNAKE_CASE = '#'
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self ):
SCREAMING_SNAKE_CASE_ : dict ={}
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple =self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ : Optional[int] ={}
SCREAMING_SNAKE_CASE_ : Any =trie[char]
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple =self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ : Tuple =trie[char]
else:
return []
return self._elements(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =[]
for c, v in d.items():
SCREAMING_SNAKE_CASE_ : List[Any] =[' '] if c == END else [(c + s) for s in self._elements(__UpperCAmelCase )]
result.extend(__UpperCAmelCase )
return tuple(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = Trie()
__SCREAMING_SNAKE_CASE = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =trie.find_word(lowerCAmelCase_ )
return tuple(string + word for word in suffixes )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 153
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 100 , ) -> float:
_a : List[Any] = x_start
_a : Union[str, Any] = fnc(lowerCAmelCase_ )
_a : Any = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
_a : Optional[Any] = (x_end - x_start) / steps + xa
_a : str = fnc(lowerCAmelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_a : List[str] = xa
_a : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
__lowerCAmelCase = 10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 358
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' ,_UpperCAmelCase ,)
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
| 358
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "deit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : List[Any] = encoder_stride
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 630
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630
| 1
|
def A__ ( _a : List[str] ):
'''simple docstring'''
return 10 - x * x
def A__ ( _a : Any , _a : str ):
'''simple docstring'''
if equation(__SCREAMING_SNAKE_CASE ) * equation(__SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError("""Wrong space!""" )
snake_case__ : List[str] =a
while (b - a) >= 0.0_1:
# Find middle point
snake_case__ : List[str] =(a + b) / 2
# Check if middle point is root
if equation(__SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__SCREAMING_SNAKE_CASE ) * equation(__SCREAMING_SNAKE_CASE ) < 0:
snake_case__ : int =c
else:
snake_case__ : Dict =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 385
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]: # noqa: E741
while r - l > 1:
__lowerCAmelCase: str = (l + r) // 2
if v[m] >= key:
__lowerCAmelCase: Optional[int] = m
else:
__lowerCAmelCase: Optional[int] = m # noqa: E741
return r
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 0
__lowerCAmelCase: int = [0] * len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = 1
__lowerCAmelCase: Union[str, Any] = v[0]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
__lowerCAmelCase: List[str] = v[i]
elif v[i] > tail[length - 1]:
__lowerCAmelCase: Union[str, Any] = v[i]
length += 1
else:
__lowerCAmelCase: Tuple = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 0
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase__ : str = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 685
|
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685
| 1
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Dict , lowercase : List[str]=1e-1_2 ) -> str:
"""simple docstring"""
snake_case : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase , axis=1 ) , a_min=lowercase ) ).T
snake_case : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase , axis=1 ) , a_min=lowercase ) ).T
return jnp.matmul(lowercase , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
__UpperCAmelCase : CLIPConfig
__UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : int = FlaxCLIPVisionModule(self.config.vision_config )
snake_case : str = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
snake_case : List[str] = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
snake_case : Dict = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
snake_case : List[Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
snake_case : Any = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : Dict = self.vision_model(UpperCamelCase__ )[1]
snake_case : int = self.visual_projection(UpperCamelCase__ )
snake_case : List[str] = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
snake_case : Optional[Any] = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
snake_case : str = 0.0
snake_case : Optional[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
snake_case : Optional[int] = jnp.round(UpperCamelCase__ , 3 )
snake_case : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
snake_case : Tuple = is_special_care * 0.01
snake_case : Optional[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
snake_case : Any = jnp.round(UpperCamelCase__ , 3 )
snake_case : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Dict = CLIPConfig
__UpperCAmelCase : Optional[Any] = '''clip_input'''
__UpperCAmelCase : Optional[int] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = jnp.floataa , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
if input_shape is None:
snake_case : Union[str, Any] = (1, 224, 224, 3)
snake_case : Tuple = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ) -> FrozenDict:
'''simple docstring'''
snake_case : Any = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
snake_case ,snake_case : str = jax.random.split(UpperCamelCase__ )
snake_case : int = {"params": params_rng, "dropout": dropout_rng}
snake_case : str = self.module.init(UpperCamelCase__ , UpperCamelCase__ )["params"]
return random_params
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 178
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = (KDPMaDiscreteScheduler,)
__UpperCAmelCase : List[Any] = 10
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase__ )
return config
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = self.scheduler_classes[0]
snake_case : str = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case : Dict = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : List[Any] = self.dummy_model()
snake_case : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : Optional[Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : int = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = output.prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Any = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
snake_case : Optional[Any] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : List[Any] = self.dummy_model()
snake_case : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : List[str] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : List[Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = output.prev_sample
snake_case : int = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if torch_device == "mps":
return
snake_case : Any = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config()
snake_case : Union[str, Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
snake_case : Optional[Any] = self.dummy_model()
snake_case : Optional[int] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case : Optional[Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = output.prev_sample
snake_case : Dict = torch.sum(torch.abs(UpperCamelCase__ ) )
snake_case : str = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 178
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__snake_case : str = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__snake_case : List[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =set()
A__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : str =char
A__ : List[Any] =set(__snake_case )
return pairs
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : Tuple="<mask>" , **lowerCAmelCase_ : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : int =vocab_file
A__ : Any =merges_file
A__ : Union[str, Any] ={}
A__ : Optional[int] =0
A__ : List[Any] =1
A__ : Tuple =2
A__ : Dict =3
self.add_from_file(lowerCAmelCase_ )
A__ : List[str] ={v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
A__ : str =merges_handle.read().split("""\n""" )[:-1]
A__ : Tuple =[tuple(merge.split()[:-1] ) for merge in merges]
A__ : Optional[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Dict ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Dict =[self.cls_token_id]
A__ : Union[str, Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : int =tuple(lowerCAmelCase_ )
A__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple =get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
A__ : List[Any] =min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Tuple =bigram
A__ : Optional[int] =[]
A__ : Tuple =0
while i < len(lowerCAmelCase_ ):
try:
A__ : str =word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Union[str, Any] =j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict =tuple(lowerCAmelCase_ )
A__ : Dict =new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ : str =get_pairs(lowerCAmelCase_ )
A__ : Dict ="""@@ """.join(lowerCAmelCase_ )
A__ : Tuple =word[:-4]
A__ : Any =word
return word
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : int =[]
A__ : Optional[int] =re.findall(R"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =""" """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[Any] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Tuple =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.merges_file , lowerCAmelCase_ )
return out_vocab_file, out_merge_file
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ : Union[str, Any] =f.readlines()
for lineTmp in lines:
A__ : List[Any] =lineTmp.strip()
A__ : Dict =line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
A__ : Tuple =line[:idx]
A__ : Tuple =len(self.encoder )
| 687
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=99 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> Tuple:
'''simple docstring'''
A__ : Tuple =parent
A__ : Any =batch_size
A__ : List[str] =seq_length
A__ : Optional[Any] =is_training
A__ : Dict =use_input_lengths
A__ : int =use_token_type_ids
A__ : Union[str, Any] =use_labels
A__ : Optional[Any] =gelu_activation
A__ : List[Any] =sinusoidal_embeddings
A__ : List[Any] =causal
A__ : str =asm
A__ : Tuple =n_langs
A__ : Dict =vocab_size
A__ : Optional[Any] =n_special
A__ : Tuple =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Optional[int] =max_position_embeddings
A__ : Optional[int] =type_sequence_label_size
A__ : Tuple =initializer_range
A__ : Any =num_labels
A__ : str =num_choices
A__ : Optional[int] =summary_type
A__ : int =use_proj
A__ : Tuple =scope
A__ : Union[str, Any] =bos_token_id
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Tuple =None
if self.use_input_lengths:
A__ : Tuple =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Optional[Any] =None
if self.use_token_type_ids:
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any =None
A__ : Tuple =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Union[str, Any] =ids_tensor([self.batch_size] , 2 ).float()
A__ : str =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Optional[int] =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
A__ : List[Any] =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : str =XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Tuple =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
A__ : Optional[Any] =model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((A__) , ) : List[Any] =result_with_labels.to_tuple()
A__ : Tuple =model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((A__) , ) : Tuple =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str =model(lowerCAmelCase_ )
A__ : List[Any] =model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
A__ : int =self.num_labels
A__ : Tuple =XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =self.num_choices
A__ : Optional[int] =XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Dict =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] =config_and_inputs
A__ : Any ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=False ) -> int:
'''simple docstring'''
A__ : Tuple =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
A__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Dict =XLMModelTester(self )
A__ : List[str] =ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : Tuple =min_length + idx + 1
A__ : Tuple =min_length + idx + 1
A__ : Dict =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=1 ) -> Any:
'''simple docstring'''
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
A__ : str =min_length + idx + 1
A__ : List[Any] =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
A__ : Any =XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase_ )
A__ : List[Any] =torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
A__ : Optional[Any] =[
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 687
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
snake_case_ : str = logging.get_logger(__name__)
class snake_case_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *__magic_name__ : str , **__magic_name__ : Any ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 488
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False , )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
UpperCamelCase = TaConfig(
vocab_size=UpperCAmelCase_ , d_model=UpperCAmelCase_ , num_heads=UpperCAmelCase_ , d_kv=UpperCAmelCase_ , d_ff=UpperCAmelCase_ , dropout_rate=UpperCAmelCase_ , feed_forward_proj=UpperCAmelCase_ , is_decoder=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase_ ):
UpperCamelCase = TaBlock(UpperCAmelCase_ )
self.encoders.append(UpperCAmelCase_ )
UpperCamelCase = TaLayerNorm(UpperCAmelCase_ )
UpperCamelCase = nn.Dropout(p=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.token_embedder(UpperCAmelCase_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(UpperCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase_ )
UpperCamelCase = self.dropout_pre(UpperCAmelCase_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ )
for lyr in self.encoders:
UpperCamelCase = lyr(UpperCAmelCase_ , UpperCAmelCase_ )[0]
UpperCamelCase = self.layer_norm(UpperCAmelCase_ )
return self.dropout_post(UpperCAmelCase_ ), encoder_inputs_mask
| 554
| 0
|
from __future__ import annotations
_snake_case = 'Muhammad Umer Farooq'
_snake_case = 'MIT'
_snake_case = '1.0.0'
_snake_case = 'Muhammad Umer Farooq'
_snake_case = 'contact@muhammadumerfarooq.me'
_snake_case = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
super().__init__()
__UpperCamelCase = []
__UpperCamelCase = domain
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCamelCase = parse.urljoin(self.domain , _SCREAMING_SNAKE_CASE )
self.urls.append(_SCREAMING_SNAKE_CASE )
def _a ( __lowercase ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(lowerCamelCase__ ).split('.' )[-2:] )
def _a ( __lowercase ) -> str:
"""simple docstring"""
return parse.urlparse(lowerCamelCase__ ).netloc
def _a ( __lowercase = "https://github.com" ) -> str:
"""simple docstring"""
__UpperCamelCase = get_domain_name(lowerCamelCase__ )
# Initialize the parser
__UpperCamelCase = Parser(lowerCamelCase__ )
try:
# Open URL
__UpperCamelCase = requests.get(lowerCamelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCamelCase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCamelCase = requests.get(lowerCamelCase__ )
# Get the valid email.
__UpperCamelCase = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCamelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCamelCase__ )
if __name__ == "__main__":
_snake_case = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 715
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConvNextFeatureExtractor']
_snake_case = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 567
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.