code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''Wav2Vec2FeatureExtractor'''
_lowercase : Optional[Any] = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
@classmethod
def _lowercase ( cls , _lowercase , **_lowercase ):
"""simple docstring"""
try:
return super().from_pretrained(_lowercase , **_lowercase )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _lowercase , )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase = WavaVecaCTCTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(feature_extractor=_lowercase , tokenizer=_lowercase )
def __call__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowerCAmelCase = kwargs.pop("""raw_speech""" )
else:
_lowerCAmelCase = kwargs.pop("""audio""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""sampling_rate""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""text""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase , **_lowercase )
_lowerCAmelCase = kwargs.pop("""input_features""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""labels""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if input_features is not None:
_lowerCAmelCase = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
if labels is not None:
_lowerCAmelCase = self.tokenizer.pad(_lowercase , **_lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase = labels["""input_ids"""]
return input_features
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
| 5
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
_UpperCamelCase : Optional[Any] = []
def generate(lowercase_ ,lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_UpperCamelCase, _UpperCamelCase : List[str] = arr[k - 1], arr[i]
else: # k is odd
_UpperCamelCase, _UpperCamelCase : int = arr[k - 1], arr[0]
generate(k - 1 ,lowercase_ )
generate(len(lowercase_ ) ,lowercase_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 624
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __magic_name__ :
def __init__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : List[str]=True , lowercase_ : str=99 , lowercase_ : Any=64 , lowercase_ : List[Any]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=64 , lowercase_ : Tuple="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Any=0.1 , lowercase_ : Tuple=512 , lowercase_ : Any=16 , lowercase_ : Tuple=2 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=3 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=None , ):
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : str = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : List[str] = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Any = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : int = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : int = num_labels
lowercase_ : int = num_choices
lowercase_ : List[str] = scope
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : List[str] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[Any] = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , lowercase_ )
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Any ):
lowercase_ : Union[str, Any] = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Any = self.num_labels
lowercase_ : Tuple = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = self.num_choices
lowercase_ : List[str] = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Any = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) : int = config_and_inputs
lowercase_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[Any] = MPNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
lowercase_ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase_ : List[Any] = model(lowercase_ )[0]
lowercase_ : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = FunnelTokenizer
__UpperCAmelCase : Any = FunnelTokenizerFast
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[Any] = True
def __lowercase ( self : int ):
'''simple docstring'''
super().setUp()
_a : Tuple = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : List[str] ,**_a : Any ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Dict ,**_a : Optional[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : Dict ):
'''simple docstring'''
_a : List[Any] = 'UNwant\u00E9d,running'
_a : Dict = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Any = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_a : Any = tokenizer('UNwant\u00E9d,running' )
_a : Tuple = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len )
_a : Tuple = tokenizer('UNwant\u00E9d,running' ,'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 229
|
'''simple docstring'''
import operator as op
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = []
_a : List[str] = lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
_a : List[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
else:
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 229
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
snake_case_ : Optional[Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class A__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , _a : Union[str, Any] , _a : Optional[int] , _a : List[Any]=None , _a : List[Any]=1 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =len(__SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
_SCREAMING_SNAKE_CASE =n_copies
def __iter__( self : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_SCREAMING_SNAKE_CASE =self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class A__ ( __UpperCAmelCase ):
def __init__( self : Dict , _a : List[Any] , _a : Union[str, Any] , _a : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =start_length
_SCREAMING_SNAKE_CASE =eof_strings
_SCREAMING_SNAKE_CASE =tokenizer
def __call__( self : Union[str, Any] , _a : List[Any] , _a : str , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_SCREAMING_SNAKE_CASE =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__SCREAMING_SNAKE_CASE )
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =re.split('''(%s)''' % '''|'''.join(_UpperCAmelCase) ,_UpperCAmelCase)
# last string should be ""
return "".join(string_list[:-2])
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__ ,a__=20 ,**a__):
_SCREAMING_SNAKE_CASE =defaultdict(_UpperCAmelCase) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_UpperCAmelCase)):
with torch.no_grad():
_SCREAMING_SNAKE_CASE =batch['''ids'''].shape[-1]
_SCREAMING_SNAKE_CASE =accelerator.unwrap_model(_UpperCAmelCase).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] ,num_return_sequences=_UpperCAmelCase ,**_UpperCAmelCase)
# each task is generated batch_size times
_SCREAMING_SNAKE_CASE =batch['''task_id'''].repeat(_UpperCAmelCase)
_SCREAMING_SNAKE_CASE =accelerator.pad_across_processes(
_UpperCAmelCase ,dim=1 ,pad_index=tokenizer.pad_token_id)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.gather((generated_tokens, generated_tasks))
_SCREAMING_SNAKE_CASE =generated_tokens.cpu().numpy()
_SCREAMING_SNAKE_CASE =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_UpperCAmelCase ,_UpperCAmelCase):
gen_token_dict[task].append(_UpperCAmelCase)
_SCREAMING_SNAKE_CASE =[[] for _ in range(_UpperCAmelCase)]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_SCREAMING_SNAKE_CASE =tokenizer.decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase ,clean_up_tokenization_spaces=_UpperCAmelCase)
code_gens[task].append(remove_last_block(_UpperCAmelCase))
return code_gens
def lowerCamelCase( ):
# Setup configuration
_SCREAMING_SNAKE_CASE =HfArgumentParser(_UpperCAmelCase)
_SCREAMING_SNAKE_CASE =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_SCREAMING_SNAKE_CASE =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_SCREAMING_SNAKE_CASE ='''false'''
if args.num_workers is None:
_SCREAMING_SNAKE_CASE =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_SCREAMING_SNAKE_CASE =Accelerator()
set_seed(args.seed ,device_specific=_UpperCAmelCase)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE =tokenizer.eos_token
_SCREAMING_SNAKE_CASE =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
# Generation settings
_SCREAMING_SNAKE_CASE ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 ,_UpperCAmelCase ,_UpperCAmelCase)]),
}
# Load evaluation dataset and metric
_SCREAMING_SNAKE_CASE =load_dataset('''openai_humaneval''')
_SCREAMING_SNAKE_CASE =load_metric('''code_eval''')
_SCREAMING_SNAKE_CASE =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''])
_SCREAMING_SNAKE_CASE =args.n_samples // args.batch_size
_SCREAMING_SNAKE_CASE =TokenizedDataset(_UpperCAmelCase ,human_eval['''test'''] ,n_copies=_UpperCAmelCase ,n_tasks=_UpperCAmelCase)
# do not confuse args.batch_size, which is actually the num_return_sequences
_SCREAMING_SNAKE_CASE =DataLoader(_UpperCAmelCase ,batch_size=1)
# Run a quick test to see if code evaluation is enabled
try:
_SCREAMING_SNAKE_CASE =code_eval_metric.compute(references=[''''''] ,predictions=[['''''']])
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''')
raise exception
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(_UpperCAmelCase ,_UpperCAmelCase)
_SCREAMING_SNAKE_CASE =complete_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,n_tasks=_UpperCAmelCase ,batch_size=args.batch_size ,**_UpperCAmelCase ,)
if accelerator.is_main_process:
_SCREAMING_SNAKE_CASE =[]
for task in tqdm(range(_UpperCAmelCase)):
_SCREAMING_SNAKE_CASE =human_eval['''test'''][task]['''test''']
_SCREAMING_SNAKE_CASE =f"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point)
# Evaluate completions with "code_eval" metric
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =code_eval_metric.compute(
references=_UpperCAmelCase ,predictions=_UpperCAmelCase ,num_workers=args.num_workers)
print(f"Results: {pass_at_k}")
# Save results to json file
with open(args.output_file ,'''w''') as fp:
json.dump(_UpperCAmelCase ,_UpperCAmelCase)
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 716
|
from collections.abc import Generator
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 1
while True:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =b, a + b
yield b
def lowerCamelCase( a__ = 1000):
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =fibonacci_generator()
while len(str(next(a__))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 191
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCamelCase : int = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
A__ = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ = getattr(lowercase_ , lowercase_ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(lowercase_ )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
A__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = '''weight'''
else:
A__ = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = full_name.split('''conv_layers.''' )[-1]
A__ = name.split('''.''' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> List[Any]:
"""simple docstring"""
A__ = torch.load(lowercase_ )
A__ = WavLMConfigOrig(checkpoint['''cfg'''] )
A__ = WavLMOrig(lowercase_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
A__ = WavLMConfig.from_pretrained(lowercase_ )
else:
A__ = WavLMConfig()
A__ = WavLMModel(lowercase_ )
recursively_load_weights(lowercase_ , lowercase_ )
hf_wavlm.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 87
|
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
lowercase__ : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 397
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """transfo-xl"""
_a = ["""mems"""]
_a = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, _lowercase=267735, _lowercase=[20000, 40000, 200000], _lowercase=1024, _lowercase=1024, _lowercase=16, _lowercase=64, _lowercase=4096, _lowercase=4, _lowercase=False, _lowercase=18, _lowercase=1600, _lowercase=1000, _lowercase=True, _lowercase=True, _lowercase=0, _lowercase=-1, _lowercase=True, _lowercase=0.1, _lowercase=0.0, _lowercase=True, _lowercase="normal", _lowercase=0.01, _lowercase=0.01, _lowercase=0.02, _lowercase=1E-5, _lowercase=0, **_lowercase, ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = []
self.cutoffs.extend(_lowercase )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE_ = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE_ = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_embed
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = div_val
SCREAMING_SNAKE_CASE_ = pre_lnorm
SCREAMING_SNAKE_CASE_ = n_layer
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = mem_len
SCREAMING_SNAKE_CASE_ = same_length
SCREAMING_SNAKE_CASE_ = attn_type
SCREAMING_SNAKE_CASE_ = clamp_len
SCREAMING_SNAKE_CASE_ = sample_softmax
SCREAMING_SNAKE_CASE_ = adaptive
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = dropatt
SCREAMING_SNAKE_CASE_ = untie_r
SCREAMING_SNAKE_CASE_ = init
SCREAMING_SNAKE_CASE_ = init_range
SCREAMING_SNAKE_CASE_ = proj_init_std
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
super().__init__(eos_token_id=_lowercase, **_lowercase )
@property
def a__ ( self ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self, _lowercase ) -> List[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 238
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' ,['default', 0, 100 * 2**20, 900 * 2**20] )
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: List[Any] ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,'IN_MEMORY_MAX_SIZE' ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = is_small_dataset(lowerCAmelCase__ )
assert result == expected
| 238
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['''MobileNetV2FeatureExtractor''']
UpperCAmelCase_ : List[Any] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 491
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __A ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase : str = df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCAmelCase : List[Any] = df_with_partition_id.select('*' ).where(f"part_id = {partition_id}" ).drop('part_id' )
lowerCAmelCase : Optional[Any] = partition_df.collect()
lowerCAmelCase : Optional[Any] = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class __A ( _BaseExamplesIterable ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : List[Any]=None , ):
lowerCAmelCase : Optional[Any] = df
lowerCAmelCase : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Union[str, Any] ):
yield from self.generate_examples_fn()
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : np.random.Generator ):
lowerCAmelCase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
lowerCAmelCase : List[Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ , UpperCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase_ )
@property
def lowercase__ ( self : Optional[Any] ):
return len(self.partition_order )
class __A ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Any = SparkConfig
def __init__( self : List[Any] , UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , **UpperCAmelCase_ : Optional[Any] , ):
import pyspark
lowerCAmelCase : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase : Dict = df
lowerCAmelCase : Dict = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase_ , )
def lowercase__ ( self : int ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ : Union[str, Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase_ )
lowerCAmelCase : Any = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def lowercase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCAmelCase : int = self.df.count()
lowerCAmelCase : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase : Tuple = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase : List[str] = min(UpperCAmelCase_ , int(approx_total_size / max_shard_size ) )
lowerCAmelCase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ):
import pyspark
lowerCAmelCase : Tuple = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCAmelCase : int = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
lowerCAmelCase : str = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase : int = self.config.features
lowerCAmelCase : Union[str, Any] = self._writer_batch_size
lowerCAmelCase : Tuple = self._fs.storage_options
def write_arrow(UpperCAmelCase_ : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase : str = next(UpperCAmelCase_ , UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = writer_class(
features=UpperCAmelCase_ , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
lowerCAmelCase : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase , lowerCAmelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
lowerCAmelCase : Tuple = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase_ , storage_options=UpperCAmelCase_ , embed_local_files=UpperCAmelCase_ , )
lowerCAmelCase : str = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
lowerCAmelCase , lowerCAmelCase : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
lowerCAmelCase : Optional[int] = os.path.join(os.path.dirname(UpperCAmelCase_ ) , os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = (
self.df.mapInArrow(UpperCAmelCase_ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase__ ( self : int , UpperCAmelCase_ : "datasets.SplitGenerator" , UpperCAmelCase_ : str = "arrow" , UpperCAmelCase_ : Optional[Union[str, int]] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : str , ):
self._validate_cache_dir()
lowerCAmelCase : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = not is_remote_filesystem(self._fs )
lowerCAmelCase : Union[str, Any] = os.path.join if is_local else posixpath.join
lowerCAmelCase : List[Any] = '-TTTTT-SSSSS-of-NNNNN'
lowerCAmelCase : Optional[Any] = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
lowerCAmelCase : int = path_join(self._output_dir , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[Any] = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Dict = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
lowerCAmelCase : Dict = total_num_examples
lowerCAmelCase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
lowerCAmelCase : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
rename(
UpperCAmelCase_ , fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace('TTTTT-SSSSS' , f"{global_shard_id:05d}" ).replace('NNNNN' , f"{total_shards:05d}" ) , )
lowerCAmelCase : int = []
lowerCAmelCase : List[Any] = 0
for i in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase , lowerCAmelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ , len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
lowerCAmelCase : int = 0
lowerCAmelCase : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f"{shard_id:05d}" ).replace('TTTTT' , f"{task_id:05d}" ) , fpath.replace(UpperCAmelCase_ , '' ) , )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 343
| 0
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowercase : int =logging.get_logger("transformers.models.speecht5")
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :Dict ) -> Optional[int]:
hf_model.apply_weight_norm()
snake_case__ : Optional[Any] = checkpoint['''input_conv.weight_g''']
snake_case__ : Optional[int] = checkpoint['''input_conv.weight_v''']
snake_case__ : Union[str, Any] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
snake_case__ : Optional[int] = checkpoint[F'''upsamples.{i}.1.weight_g''']
snake_case__ : List[str] = checkpoint[F'''upsamples.{i}.1.weight_v''']
snake_case__ : List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case__ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
snake_case__ : Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
snake_case__ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
snake_case__ : Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
snake_case__ : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
snake_case__ : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
snake_case__ : int = checkpoint['''output_conv.1.weight_g''']
snake_case__ : str = checkpoint['''output_conv.1.weight_v''']
snake_case__ : Optional[Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Dict , UpperCamelCase__ :Any , UpperCamelCase__ :Optional[int]=None , UpperCamelCase__ :int=None , ) -> Tuple:
if config_path is not None:
snake_case__ : List[Any] = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase__ )
else:
snake_case__ : Any = SpeechTaHifiGanConfig()
snake_case__ : Dict = SpeechTaHifiGan(UpperCamelCase__ )
snake_case__ : Any = torch.load(UpperCamelCase__ )
load_weights(orig_checkpoint['''model''']['''generator'''] , UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[Any] = np.load(UpperCamelCase__ )
snake_case__ : Tuple = stats[0].reshape(-1 )
snake_case__ : Dict = stats[1].reshape(-1 )
snake_case__ : int = torch.from_numpy(UpperCamelCase__ ).float()
snake_case__ : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ).float()
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : List[str] =argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowercase : str =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 574
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowercase : Tuple ={
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :Dict=None ) -> int:
# Initialise PyTorch model
snake_case__ : List[Any] = XLNetConfig.from_json_file(UpperCamelCase__ )
snake_case__ : Optional[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
snake_case__ : Union[str, Any] = finetuning_task
snake_case__ : str = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case__ : List[Any] = XLNetForSequenceClassification(UpperCamelCase__ )
elif "squad" in finetuning_task:
snake_case__ : str = finetuning_task
snake_case__ : List[str] = XLNetForQuestionAnswering(UpperCamelCase__ )
else:
snake_case__ : Tuple = XLNetLMHeadModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
snake_case__ : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_lowercase : Optional[Any] =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 574
| 1
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = XGLMTokenizer
_UpperCamelCase = XGLMTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
a_ = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = '<pad>'
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase__ ) , 1008 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ = XGLMTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
a_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase__ , f.name )
a_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase__ )
a_ = pickle.dumps(UpperCAmelCase__ )
pickle.loads(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = 'I was born in 92000, and this is falsé.'
a_ = tokenizer.tokenize(UpperCAmelCase__ )
a_ = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
a_ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(UpperCAmelCase__ )
a_ = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'Hello World!'
a_ = [2, 3_1227, 4447, 35]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
a_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# fmt: off
a_ = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='facebook/xglm-564M' , padding=UpperCAmelCase__ , )
| 697
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697
| 1
|
def UpperCamelCase( lowercase_ = 100 ) -> int:
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 161
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __snake_case , )
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = RobertaConfig
lowerCamelCase_ : Dict = 'roberta'
def __init__( self , lowerCamelCase ) -> List[str]:
super().__init__(lowerCamelCase )
snake_case_ = RobertaEmbeddings(lowerCamelCase )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __snake_case , )
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[Any] = RobertaConfig
lowerCamelCase_ : int = 'roberta'
def __init__( self , lowerCamelCase ) -> Dict:
super().__init__(lowerCamelCase )
snake_case_ = config.num_labels
snake_case_ = config.num_hidden_layers
snake_case_ = DeeRobertaModel(lowerCamelCase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ) -> List[str]:
snake_case_ = self.num_layers
try:
snake_case_ = self.roberta(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
snake_case_ = outputs[1]
snake_case_ = self.dropout(lowerCamelCase )
snake_case_ = self.classifier(lowerCamelCase )
snake_case_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ = e.message
snake_case_ = e.exit_layer
snake_case_ = outputs[0]
if not self.training:
snake_case_ = entropy(lowerCamelCase )
snake_case_ = []
snake_case_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ = []
for highway_exit in outputs[-1]:
snake_case_ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
snake_case_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ = (loss,) + outputs
if not self.training:
snake_case_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 161
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Dict = DebertaTokenizer
a__: int = True
a__: List[str] = DebertaTokenizerFast
def UpperCAmelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase_ = {'''unk_token''': '''[UNK]'''}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tokenizer('''Hello''' , '''World''' )
lowerCamelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase_ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase_ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase )
lowerCamelCase_ = [tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase_ = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase_ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCAmelCase )
for expected, decoded in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( A__ : np.ndarray ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : List[str] = np.shape(A__ )
if rows != columns:
lowerCAmelCase_ : List[str] = (
"""'table' has to be of square shaped array but got a """
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(A__ )
lowerCAmelCase_ : Optional[int] = np.zeros((rows, columns) )
lowerCAmelCase_ : Tuple = np.zeros((rows, columns) )
for i in range(A__ ):
for j in range(A__ ):
lowerCAmelCase_ : str = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowerCAmelCase_ : Union[str, Any] = (table[i][j] - total) / upper[j][j]
lowerCAmelCase_ : Dict = 1
for j in range(A__ , A__ ):
lowerCAmelCase_ : str = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
lowerCAmelCase_ : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase__ = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase__ = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase__ = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase__ = 'tabby, tabby cat'
lowerCAmelCase__ = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> Optional[Any]:
UpperCamelCase__ : int = {}
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = model.mobilenet_va
else:
UpperCamelCase__ : str = model
UpperCamelCase__ : int = '''MobilenetV1/Conv2d_0/'''
UpperCamelCase__ : Optional[int] = backbone.conv_stem.convolution.weight
UpperCamelCase__ : Dict = backbone.conv_stem.normalization.bias
UpperCamelCase__ : Any = backbone.conv_stem.normalization.weight
UpperCamelCase__ : Optional[int] = backbone.conv_stem.normalization.running_mean
UpperCamelCase__ : int = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase__ : Union[str, Any] = i + 1
UpperCamelCase__ : Any = i * 2
UpperCamelCase__ : List[Any] = backbone.layer[pt_index]
UpperCamelCase__ : List[Any] = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
UpperCamelCase__ : Union[str, Any] = pointer.convolution.weight
UpperCamelCase__ : Dict = pointer.normalization.bias
UpperCamelCase__ : Tuple = pointer.normalization.weight
UpperCamelCase__ : int = pointer.normalization.running_mean
UpperCamelCase__ : List[Any] = pointer.normalization.running_var
UpperCamelCase__ : Optional[Any] = backbone.layer[pt_index + 1]
UpperCamelCase__ : Dict = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
UpperCamelCase__ : Any = pointer.convolution.weight
UpperCamelCase__ : Dict = pointer.normalization.bias
UpperCamelCase__ : Optional[int] = pointer.normalization.weight
UpperCamelCase__ : int = pointer.normalization.running_mean
UpperCamelCase__ : Dict = pointer.normalization.running_var
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
UpperCamelCase__ : Optional[int] = model.classifier.weight
UpperCamelCase__ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase__ : str = tf.train.list_variables(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}')
UpperCamelCase__ : Optional[Any] = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = array
# Build TF to PyTorch weights loading map
UpperCamelCase__ : List[str] = _build_tf_to_pytorch_map(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}')
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping')
continue
UpperCamelCase__ : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase__ : List[str] = np.transpose(lowerCamelCase_ , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase__ : Any = array.squeeze().transpose()
else:
UpperCamelCase__ : Optional[Any] = np.transpose(lowerCamelCase_ , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched')
logger.info(f'Initialize PyTorch weight {name} {array.shape}')
UpperCamelCase__ : Optional[int] = torch.from_numpy(lowerCamelCase_)
tf_weights.pop(lowerCamelCase_ , lowerCamelCase_)
tf_weights.pop(name + '/RMSProp' , lowerCamelCase_)
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase_)
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase_)
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}')
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[str] = features.shape[-2:]
UpperCamelCase__ : List[str] = conv_layer.stride
UpperCamelCase__ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase__ : List[Any] = max(kernel_height - stride_height , 0)
else:
UpperCamelCase__ : List[Any] = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase__ : str = max(kernel_width - stride_width , 0)
else:
UpperCamelCase__ : Dict = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase__ : Union[str, Any] = pad_along_width // 2
UpperCamelCase__ : List[str] = pad_along_width - pad_left
UpperCamelCase__ : Union[str, Any] = pad_along_height // 2
UpperCamelCase__ : Tuple = pad_along_height - pad_top
UpperCamelCase__ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ , lowerCamelCase_ , 'constant' , 0.0)
class __lowercase (nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] = 1 , UpperCAmelCase_ : Optional[Any] = 1 , UpperCAmelCase_ : List[str] = False , UpperCAmelCase_ : str = True , UpperCAmelCase_ : Tuple = True , ):
super().__init__()
UpperCamelCase__ : int = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.')
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.')
UpperCamelCase__ : Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
UpperCamelCase__ : List[Any] = nn.Convad(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase__ : Optional[int] = nn.BatchNormad(
num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , )
else:
UpperCamelCase__ : List[str] = None
if use_activation:
if isinstance(__lowerCamelCase , __lowerCamelCase):
UpperCamelCase__ : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCamelCase):
UpperCamelCase__ : Dict = ACTaFN[config.hidden_act]
else:
UpperCamelCase__ : Optional[int] = config.hidden_act
else:
UpperCamelCase__ : Union[str, Any] = None
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[str]):
if self.config.tf_padding:
UpperCamelCase__ : int = apply_tf_padding(__lowerCamelCase , self.convolution)
UpperCamelCase__ : List[Any] = self.convolution(__lowerCamelCase)
if self.normalization is not None:
UpperCamelCase__ : Optional[int] = self.normalization(__lowerCamelCase)
if self.activation is not None:
UpperCamelCase__ : Any = self.activation(__lowerCamelCase)
return features
class __lowercase (lowerCamelCase__ ):
_lowerCamelCase = MobileNetVaConfig
_lowerCamelCase = load_tf_weights_in_mobilenet_va
_lowerCamelCase = '''mobilenet_v1'''
_lowerCamelCase = '''pixel_values'''
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any]):
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase__ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase__ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class __lowercase (lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = True):
super().__init__(__lowerCamelCase)
UpperCamelCase__ : str = config
UpperCamelCase__ : List[Any] = 32
UpperCamelCase__ : Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
UpperCamelCase__ : List[Any] = MobileNetVaConvLayer(
__lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , )
UpperCamelCase__ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase__ : str = nn.ModuleList()
for i in range(13):
UpperCamelCase__ : Optional[int] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase__ : Optional[Any] = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ))
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ))
UpperCamelCase__ : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : List[str] = None , ):
UpperCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
UpperCamelCase__ : Any = self.conv_stem(__lowerCamelCase)
UpperCamelCase__ : int = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
UpperCamelCase__ : Any = layer_module(__lowerCamelCase)
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] = all_hidden_states + (hidden_states,)
UpperCamelCase__ : List[str] = hidden_states
if self.pooler is not None:
UpperCamelCase__ : List[Any] = torch.flatten(self.pooler(__lowerCamelCase) , start_dim=1)
else:
UpperCamelCase__ : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase__ , )
class __lowercase (lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : str):
super().__init__(__lowerCamelCase)
UpperCamelCase__ : List[str] = config.num_labels
UpperCamelCase__ : Optional[Any] = MobileNetVaModel(__lowerCamelCase)
UpperCamelCase__ : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase__ : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase)
UpperCamelCase__ : Dict = nn.Linear(__lowerCamelCase , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Dict = None , ):
UpperCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Dict = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase)
UpperCamelCase__ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Optional[int] = self.classifier(self.dropout(__lowerCamelCase))
UpperCamelCase__ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : Union[str, Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : Optional[int] = '''single_label_classification'''
else:
UpperCamelCase__ : Dict = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase__ : int = MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
UpperCamelCase__ : Any = loss_fct(__lowerCamelCase , __lowerCamelCase)
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[str] = CrossEntropyLoss()
UpperCamelCase__ : Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[int] = BCEWithLogitsLoss()
UpperCamelCase__ : Dict = loss_fct(__lowerCamelCase , __lowerCamelCase)
if not return_dict:
UpperCamelCase__ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
| 701
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __A ( self ):
_lowerCAmelCase : Dict = self.dummy_uncond_unet
_lowerCAmelCase : List[str] = PNDMScheduler()
_lowerCAmelCase : Optional[Any] = PNDMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pndm.to(_lowerCAmelCase )
pndm.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pndm(generator=_lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Any = pndm(generator=_lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=_lowerCAmelCase )[0]
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Tuple = """google/ddpm-cifar10-32"""
_lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase : str = PNDMScheduler()
_lowerCAmelCase : List[Any] = PNDMPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pndm.to(_lowerCAmelCase )
pndm.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : str = pndm(generator=_lowerCAmelCase , output_type="""numpy""" ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : List[Any] = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 213
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'wav2vec2'
def __init__( self : Any , _lowerCAmelCase : Optional[Any]=3_2 , _lowerCAmelCase : List[Any]=7_6_8 , _lowerCAmelCase : Dict=1_2 , _lowerCAmelCase : Tuple=1_2 , _lowerCAmelCase : Optional[int]=3_0_7_2 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Any=1e-5 , _lowerCAmelCase : Any="group" , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : int=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Union[str, Any]=1_6 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=0.05 , _lowerCAmelCase : List[str]=1_0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[Any]="sum" , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[str]=2_5_6 , _lowerCAmelCase : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : List[str]=(5, 3, 3, 1, 1) , _lowerCAmelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
snake_case_ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
snake_case_ = xvector_output_dim
@property
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 283
| 0
|
from __future__ import annotations
def a__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
if start is None:
SCREAMING_SNAKE_CASE_ = 0
if end is None:
SCREAMING_SNAKE_CASE_ = len(_snake_case ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : int = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : str = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :Tuple=None , snake_case_ :Optional[Any]=None , snake_case_ :Union[str, Any]=None , snake_case_ :Optional[int]=None , snake_case_ :Union[str, Any]=None , snake_case_ :Dict=None , ):
if attention_mask is None:
__UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : str=13 , _lowercase : Dict=7 , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=False , _lowercase : str=99 , _lowercase : List[str]=16 , _lowercase : List[str]=2 , _lowercase : str=4 , _lowercase : Optional[Any]=4 , _lowercase : List[Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : List[Any]=0.1 , _lowercase : List[Any]=32 , _lowercase : List[str]=2 , _lowercase : Optional[Any]=1 , _lowercase : Any=0 , _lowercase : Tuple=0.02 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = initializer_range
def a ( self : List[str] ):
__UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase = shift_tokens_right(_lowercase , 1 , 2 )
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
__UpperCAmelCase = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[str] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(_lowercase )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
__UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
__UpperCAmelCase = model.decode(_lowercase , _lowercase )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def a ( self : str , _lowercase : Any , _lowercase : List[str] , _lowercase : List[Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(_lowercase )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
__UpperCAmelCase = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
a__ : int = 99
def a ( self : Optional[Any] ):
__UpperCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_config_and_data()
__UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_lowercase )
__UpperCAmelCase = lm_model(input_ids=_lowercase )
__UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(_lowercase )
__UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
__UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def a ( self : str ):
__UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCAmelCase = shift_tokens_right(_lowercase , 1 , 2 )
__UpperCAmelCase = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase , _lowerCAmelCase ):
a__ : List[Any] = True
a__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
a__ : Dict = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a ( self : Optional[int] ):
__UpperCAmelCase = FlaxBlenderbotSmallModelTester(self )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def a ( self : int ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase : int , _lowercase : str=None , **_lowercase : List[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = encode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self : Optional[int] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase : Optional[int] , _lowercase : Tuple , _lowercase : List[str] ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = decode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a ( self : Dict ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase = model(_lowercase )
self.assertIsNotNone(_lowercase )
| 49
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowercase__ ):
lowercase : Tuple = (DEISMultistepScheduler,)
lowercase : List[str] = (('num_inference_steps', 2_5),)
def a__ ( self :Any ,**_UpperCamelCase :str ):
snake_case_ : List[Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_UpperCamelCase )
return config
def a__ ( self :str ,_UpperCamelCase :Optional[Any]=0 ,**_UpperCamelCase :Any ):
snake_case_ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case_ : Optional[Any] = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase )
snake_case_ : Union[str, Any] = self.dummy_sample
snake_case_ : Tuple = 0.1 * sample
snake_case_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ : Optional[Any] = self.get_scheduler_config(**_UpperCamelCase )
snake_case_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
snake_case_ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
snake_case_ : Optional[int] = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
snake_case_ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ : int = sample, sample
for t in range(_UpperCamelCase ,time_step + scheduler.config.solver_order + 1 ):
snake_case_ : str = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
snake_case_ : Optional[int] = new_scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self :str ):
pass
def a__ ( self :Any ,_UpperCamelCase :List[str]=0 ,**_UpperCamelCase :List[Any] ):
snake_case_ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase )
snake_case_ : str = self.dummy_sample
snake_case_ : Any = 0.1 * sample
snake_case_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
snake_case_ : Dict = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ : List[Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
snake_case_ : str = new_scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self :List[Any] ,_UpperCamelCase :str=None ,**_UpperCamelCase :Dict ):
if scheduler is None:
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config(**_UpperCamelCase )
snake_case_ : Dict = scheduler_class(**_UpperCamelCase )
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config(**_UpperCamelCase )
snake_case_ : List[str] = scheduler_class(**_UpperCamelCase )
snake_case_ : str = 1_0
snake_case_ : int = self.dummy_model()
snake_case_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample
return sample
def a__ ( self :Optional[int] ):
snake_case_ : str = dict(self.forward_default_kwargs )
snake_case_ : Dict = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase )
for scheduler_class in self.scheduler_classes:
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**_UpperCamelCase )
snake_case_ : str = self.dummy_sample
snake_case_ : str = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase ,"""set_timesteps""" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase ,"""set_timesteps""" ):
snake_case_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ : List[str] = scheduler.timesteps[5]
snake_case_ : str = scheduler.timesteps[6]
snake_case_ : Optional[Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
snake_case_ : Dict = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self :Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case_ : List[str] = self.full_loop(scheduler=_UpperCamelCase )
snake_case_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
snake_case_ : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ : str = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ : Dict = self.full_loop(scheduler=_UpperCamelCase )
snake_case_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def a__ ( self :Tuple ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def a__ ( self :int ):
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase ,prediction_type=_UpperCamelCase ,sample_max_value=_UpperCamelCase ,algorithm_type="""deis""" ,solver_order=_UpperCamelCase ,solver_type=_UpperCamelCase ,)
def a__ ( self :List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def a__ ( self :Dict ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase ,solver_type=_UpperCamelCase ,prediction_type=_UpperCamelCase ,algorithm_type=_UpperCamelCase ,)
snake_case_ : Tuple = self.full_loop(
solver_order=_UpperCamelCase ,solver_type=_UpperCamelCase ,prediction_type=_UpperCamelCase ,algorithm_type=_UpperCamelCase ,)
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def a__ ( self :int ):
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def a__ ( self :Optional[int] ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_UpperCamelCase ,time_step=0 )
def a__ ( self :Optional[Any] ):
snake_case_ : Union[str, Any] = self.full_loop()
snake_case_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def a__ ( self :List[str] ):
snake_case_ : int = self.full_loop(prediction_type="""v_prediction""" )
snake_case_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def a__ ( self :Union[str, Any] ):
snake_case_ : List[str] = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config(thresholding=_UpperCamelCase ,dynamic_thresholding_ratio=0 )
snake_case_ : Tuple = scheduler_class(**_UpperCamelCase )
snake_case_ : str = 1_0
snake_case_ : str = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[int] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 334
| 0
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = ["a", "b", "c"]
# Defaults to last layer if both are None
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["c"] )
self.assertEqual(UpperCAmelCase_ , [2] )
# Out indices set to match out features
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(["a", "c"] , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [0, 2] )
# Out features set to match out indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(UpperCAmelCase_ , [0, 2] , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [0, 2] )
# Out features selected from negative indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(UpperCAmelCase_ , [-3, -1] , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , ["a", "c"] )
self.assertEqual(UpperCAmelCase_ , [-3, -1] )
def _lowercase ( self ):
# Stage names must be set
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , UpperCAmelCase_ )
# Out features must be a list
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(UpperCAmelCase_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(UpperCAmelCase_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _lowercase ( self ):
snake_case_ = BackboneMixin()
snake_case_ = ["a", "b", "c"]
snake_case_ = ["a", "c"]
snake_case_ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case_ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case_ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 715
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
snake_case_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["fail_state"]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["value"] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["fail_state"]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["fail_state"]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
| 0
|
from collections.abc import Sequence
def lowerCamelCase__ ( __A :Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__snake_case = nums[0]
for i in range(1 ,len(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = nums[i]
__snake_case = max(SCREAMING_SNAKE_CASE_ ,ans + num ,SCREAMING_SNAKE_CASE_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase__ = int(input('''Enter number of elements : ''').strip())
UpperCamelCase__ = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 268
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=[1, 1, 2] , lowercase__=1 , lowercase__=32 , lowercase__=4 , lowercase__=8 , lowercase__=37 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=512 , lowercase__=3 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : str = block_sizes
SCREAMING_SNAKE_CASE_ : Any = num_decoder_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : int = n_head
SCREAMING_SNAKE_CASE_ : List[str] = d_head
SCREAMING_SNAKE_CASE_ : List[str] = d_inner
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE_ : Any = attention_dropout
SCREAMING_SNAKE_CASE_ : str = activation_dropout
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = 2
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE_ : Any = scope
SCREAMING_SNAKE_CASE_ : Tuple = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_ : List[str] = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_ : List[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_ : str = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_ : Tuple = self.num_hidden_layers + 2
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : List[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFFunnelModel(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = model(lowercase__ )
SCREAMING_SNAKE_CASE_ : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelModel(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Tuple = TFFunnelModel(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFFunnelBaseModel(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFFunnelBaseModel(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = TFFunnelBaseModel(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFFunnelForPreTraining(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Any = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFFunnelForMaskedLM(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFFunnelForSequenceClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Any = TFFunnelForMultipleChoice(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[str] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = TFFunnelForTokenClassification(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFFunnelForQuestionAnswering(config=lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self , config_class=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,unittest.TestCase ):
_A = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_A = False
_A = False
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFFunnelModelTester(self , base=lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self , config_class=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 421
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
lowerCAmelCase : Optional[int] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowerCAmelCase : Dict = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCAmelCase : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase : List[str] = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase : Optional[int] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 713
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase : List[Any] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowerCAmelCase : List[str] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
lowerCAmelCase : str = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def _lowerCAmelCase ( self , _a , _a , _a = CHRF.CHAR_ORDER , _a = CHRF.WORD_ORDER , _a = CHRF.BETA , _a = False , _a = False , _a = False , ):
"""simple docstring"""
lowerCamelCase = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase = [[refs[i] for refs in references] for i in range(_a )]
lowerCamelCase = CHRF(_a , _a , _a , _a , _a , _a )
lowerCamelCase = sb_chrf.corpus_score(_a , _a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 533
| 0
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase : List[Any] = k.replace(snake_case__ , snake_case__ )
if k.startswith("""encoder""" ):
lowercase : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
lowercase : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowercase : Dict = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowercase : Union[str, Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
lowercase : Optional[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowercase : List[Any] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Dict = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowercase : Union[str, Any] = sd.pop(snake_case__ )
lowercase : int = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowercase : Optional[int] = v
lowercase : Optional[Any] = ["START"]
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Dict = torch.load(snake_case__ , map_location="""cpu""" )
lowercase : str = model["""model"""]
lowercase : Optional[Any] = BlenderbotConfig.from_json_file(snake_case__ )
lowercase : str = BlenderbotForConditionalGeneration(snake_case__ )
lowercase : List[Any] = m.model.state_dict().keys()
lowercase : List[str] = []
lowercase : Tuple = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase : List[Any] = rename_state_dict_key(snake_case__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase : Any = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case__ )
m.model.load_state_dict(snake_case__ , strict=snake_case__ )
m.half()
m.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowercase : Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 336
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __snake_case :
def __init__( self ,a_ ,a_ = 13 ,a_ = 64 ,a_ = 2 ,a_ = 3 ,a_ = 3 ,a_ = True ,a_ = True ,a_ = 128 ,a_=[16, 32, 64, 128] ,a_ = 7 ,a_ = 4 ,a_ = 37 ,a_ = "gelu" ,a_ = 0.1 ,a_ = 0.1 ,a_ = 10 ,a_ = 0.02 ,a_ = 2 ,a_ = 1 ,a_ = 128 ,a_ = [2, 2, 2, 2] ,a_ = 2 ,a_ = 2 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = num_attention_outputs
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = embed_dim + 1
lowerCAmelCase__ = resolution
lowerCAmelCase__ = depths
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = dim
lowerCAmelCase__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=a_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerModel(config=a_ )
lowerCAmelCase__ = model(a_ ,training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFEfficientFormerForImageClassification(a_ )
lowerCAmelCase__ = model(a_ ,labels=a_ ,training=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFEfficientFormerForImageClassification(a_ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self ,config_class=a_ ,has_text_modality=a_ ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
def check_hidden_states_output(a_ ,a_ ,a_ ):
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ,training=a_ )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) ,a_ )
if hasattr(self.model_tester ,'encoder_seq_length' ):
lowerCAmelCase__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCAmelCase__ = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
lowerCAmelCase__ = outputs.decoder_hidden_states
self.asseretIsInstance(a_ ,(list, tuple) )
self.assertEqual(len(a_ ) ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'seq_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'decoder_seq_length' ,a_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(a_ ,a_ ,a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(a_ ,a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_=False ):
"""simple docstring"""
lowerCAmelCase__ = super()._prepare_for_class(a_ ,a_ ,return_labels=a_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFEfficientFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester ,'seq_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'encoder_seq_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'key_length' ,a_ )
lowerCAmelCase__ = getattr(self.model_tester ,'chunk_length' ,a_ )
if chunk_length is not None and hasattr(self.model_tester ,'num_hashes' ):
lowerCAmelCase__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ,training=a_ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ,training=a_ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase__ = model_class(a_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase__ = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=a_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase__ = model(a_ )
self.assertTrue(outputs_dict is not None )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**a_ ,training=a_ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**a_ ,training=a_ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
| 193
| 0
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =np.max(_outputs , axis=-1 , keepdims=lowercase__ )
lowerCamelCase__ : int =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class __SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
_a = 'sigmoid'
_a = 'softmax'
_a = 'none'
@add_end_docstrings(
__snake_case , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n ' , )
class __SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
_a = False
_a = ClassificationFunction.NONE
def __init__( self : Dict, **lowerCamelCase : Optional[Any] )-> Tuple:
super().__init__(**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self : Union[str, Any], lowerCamelCase : Optional[Any]=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[int]="", **lowerCamelCase : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Tuple =tokenizer_kwargs
lowerCamelCase__ : Dict ={}
if hasattr(self.model.config, '''return_all_scores''' ) and return_all_scores is None:
lowerCamelCase__ : Optional[Any] =self.model.config.return_all_scores
if isinstance(__UpperCamelCase, __UpperCamelCase ) or top_k is None:
lowerCamelCase__ : List[str] =top_k
lowerCamelCase__ : List[Any] =False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''', __UpperCamelCase, )
if return_all_scores:
lowerCamelCase__ : Union[str, Any] =None
else:
lowerCamelCase__ : str =1
if isinstance(__UpperCamelCase, __UpperCamelCase ):
lowerCamelCase__ : str =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase__ : Optional[Any] =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] )-> str:
lowerCamelCase__ : Any =super().__call__(*__UpperCamelCase, **__UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase__ : List[str] ='''top_k''' not in kwargs
if isinstance(args[0], __UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self : int, lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.framework
if isinstance(__UpperCamelCase, __UpperCamelCase ):
return self.tokenizer(**__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )
elif isinstance(__UpperCamelCase, __UpperCamelCase ) and len(__UpperCamelCase ) == 1 and isinstance(inputs[0], __UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=__UpperCamelCase, **__UpperCamelCase )
elif isinstance(__UpperCamelCase, __UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(__UpperCamelCase, return_tensors=__UpperCamelCase, **__UpperCamelCase )
def snake_case ( self : Union[str, Any], lowerCamelCase : List[str] )-> Optional[Any]:
return self.model(**__UpperCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any]=None, lowerCamelCase : List[Any]=1, lowerCamelCase : Any=True )-> List[Any]:
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase__ : Optional[int] =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase__ : Optional[int] =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, '''function_to_apply''' ) and function_to_apply is None:
lowerCamelCase__ : List[str] =self.model.config.function_to_apply
else:
lowerCamelCase__ : Optional[int] =ClassificationFunction.NONE
lowerCamelCase__ : str =model_outputs['''logits'''][0]
lowerCamelCase__ : Dict =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase__ : Dict =sigmoid(__UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase__ : Optional[Any] =softmax(__UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase__ : Any =outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase__ : Optional[Any] =[
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCamelCase : x["score"], reverse=__UpperCamelCase )
if top_k is not None:
lowerCamelCase__ : Union[str, Any] =dict_scores[:top_k]
return dict_scores
| 702
|
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowercase :
def __init__( self : Any ,A : Union[str, Any] ,A : Optional[Any]=2 ,A : Tuple=True ,A : Tuple=False ,A : Optional[int]=10 ,A : Any=3 ,A : Tuple=32 * 8 ,A : List[Any]=32 * 8 ,A : int=4 ,A : List[Any]=64 ,):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Optional[Any] = use_auxiliary_loss
UpperCAmelCase__ : int = num_queries
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : List[str] = min_size
UpperCAmelCase__ : Optional[Any] = max_size
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = hidden_dim
UpperCAmelCase__ : Union[str, Any] = hidden_dim
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A )
UpperCAmelCase__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A )
UpperCAmelCase__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A ) > 0.5
).float()
UpperCAmelCase__ : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) ,device=A ) > 0.5).long()
UpperCAmelCase__ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
UpperCAmelCase__ : int = self.num_queries
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : List[Any] = [1, 1, 1, 1]
UpperCAmelCase__ : List[Any] = self.num_channels
UpperCAmelCase__ : List[Any] = 64
UpperCAmelCase__ : str = 128
UpperCAmelCase__ : int = self.hidden_dim
UpperCAmelCase__ : List[Any] = self.hidden_dim
UpperCAmelCase__ : int = self.hidden_dim
return config
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __lowercase ( self : Optional[int] ,A : Any ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = output.encoder_hidden_states
UpperCAmelCase__ : List[str] = output.pixel_decoder_hidden_states
UpperCAmelCase__ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) ,config.decoder_layers )
def __lowercase ( self : List[Any] ,A : List[Any] ,A : Dict ,A : Union[str, Any] ,A : str=False ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = MaskaFormerModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Tuple = model(pixel_values=A ,pixel_mask=A )
UpperCAmelCase__ : Optional[int] = model(A ,output_hidden_states=A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A ,A )
def __lowercase ( self : Tuple ,A : List[str] ,A : Dict ,A : Tuple ,A : Any ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=A )
model.to(A )
model.eval()
def comm_check_on_output(A : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(pixel_values=A ,pixel_mask=A )
UpperCAmelCase__ : Tuple = model(A )
comm_check_on_output(A )
UpperCAmelCase__ : Optional[Any] = model(
pixel_values=A ,pixel_mask=A ,mask_labels=A ,class_labels=A )
comm_check_on_output(A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MaskaFormerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=A ,has_text_modality=A )
def __lowercase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A ,**A ,output_hidden_states=A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase__ : Union[str, Any] = MaskaFormerModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase__ : Dict = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=A ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=A ),
"""class_labels""": torch.zeros(2 ,10 ,device=A ).long(),
}
UpperCAmelCase__ : List[Any] = self.model_tester.get_config()
UpperCAmelCase__ : Union[str, Any] = MaskaFormerForUniversalSegmentation(A ).to(A )
UpperCAmelCase__ : List[str] = model(**A )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A ,**A ,output_hidden_states=A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(A ).to(A )
UpperCAmelCase__ : str = model(**A ,output_attentions=A )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = self.all_model_classes[1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Union[str, Any] = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : Optional[Any] = model(A ,mask_labels=A ,class_labels=A ).loss
loss.backward()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.all_model_classes[1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : int = model_class(A ).to(A )
model.train()
UpperCAmelCase__ : Tuple = model(A ,mask_labels=A ,class_labels=A )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase__ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase = 1E-4
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : str ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : str ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(A ,return_tensors="""pt""" ).to(A )
UpperCAmelCase__ : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A ,(1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase__ : int = model(**A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A ,atol=A ) )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A ,atol=A ) )
UpperCAmelCase__ : int = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A ,atol=A ) )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : List[str] = image_processor(A ,return_tensors="""pt""" ).to(A )
UpperCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A ,(1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**A )
# masks_queries_logits
UpperCAmelCase__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase__ : Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase__ : Dict = torch.tensor(A ).to(A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A ,atol=A ) )
# class_queries_logits
UpperCAmelCase__ : Any = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase__ : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A ,atol=A ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : Any = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
UpperCAmelCase__ : Tuple = inputs["""pixel_values"""].to(A )
UpperCAmelCase__ : List[Any] = [el.to(A ) for el in inputs["""mask_labels"""]]
UpperCAmelCase__ : Union[str, Any] = [el.to(A ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**A )
self.assertTrue(outputs.loss is not None )
| 65
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
__UpperCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
__UpperCAmelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Optional[Any]=None ,A : List[str]=1 ,A : Optional[Any]="binary" ,A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = fa_score(
A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A )
return {"f1": float(A ) if score.size == 1 else score}
| 65
| 1
|
from __future__ import annotations
def __UpperCAmelCase ( a_):
snake_case_ = [True] * limit
snake_case_ = False
snake_case_ = False
snake_case_ = True
for i in range(3 , int(limit**0.5 + 1) , 2):
snake_case_ = i * 2
while index < limit:
snake_case_ = False
snake_case_ = index + i
snake_case_ = [2]
for i in range(3 , a_ , 2):
if is_prime[i]:
primes.append(a_)
return primes
def __UpperCAmelCase ( a_ = 1_00_00_00):
snake_case_ = prime_sieve(a_)
snake_case_ = 0
snake_case_ = 0
for i in range(len(a_)):
for j in range(i + length , len(a_)):
snake_case_ = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
snake_case_ = j - i
snake_case_ = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 607
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCAmelCase ( a_):
if isinstance(a_ , torch.Tensor):
return image
elif isinstance(a_ , PIL.Image.Image):
snake_case_ = [image]
snake_case_ = [trans(img.convert('RGB')) for img in image]
snake_case_ = torch.stack(a_)
return image
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
def _UpperCamelCase ( self , a ) -> List[str]:
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _UpperCamelCase ( self , a , a , a ) -> Any:
# get the original timestep using init_timestep
snake_case_ = min(int(num_inference_steps * strength ) , a )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self , a , a , a , a , a , a=None ) -> List[Any]:
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}''' )
snake_case_ = image.to(device=a , dtype=a )
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
print('add noise to latents at timestep' , a )
snake_case_ = self.scheduler.add_noise(a , a , a )
snake_case_ = init_latents
return latents
@torch.no_grad()
def __call__( self , a = None , a = 0.8 , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(a )
# 2. Preprocess image
snake_case_ = preprocess(a )
# 3. set timesteps
self.scheduler.set_timesteps(a , device=self.device )
snake_case_ , snake_case_ = self.get_timesteps(a , a , self.device )
snake_case_ = timesteps[:1].repeat(a )
# 4. Prepare latent variables
snake_case_ = self.prepare_latents(a , a , a , self.unet.dtype , self.device , a )
snake_case_ = latents
# 5. Denoising loop
for t in self.progress_bar(a ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a , ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a )
| 607
| 1
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
snake_case = logging.get_logger(__name__)
snake_case = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
snake_case = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
snake_case = {
"""jukebox""": 5_12,
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_LYRIC_TOKENS_SIZES
A_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , a__ : Any , a__ : Optional[Any] , a__ : str , a__ : str=["v3", "v2", "v2"] , a__ : str=512 , a__ : str=5 , a__ : Dict="<|endoftext|>" , **a__ : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : int = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
super().__init__(
unk_token=a__ , n_genres=a__ , version=a__ , max_n_lyric_tokens=a__ , **a__ , )
lowerCAmelCase__ : Optional[Any] = version
lowerCAmelCase__ : Union[str, Any] = max_n_lyric_tokens
lowerCAmelCase__ : str = n_genres
with open(a__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ : int = json.load(a__ )
with open(a__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ : Dict = json.load(a__ )
with open(a__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ : Optional[Any] = json.load(a__ )
lowerCAmelCase__ : Optional[Any] = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCAmelCase__ : Tuple = oov.replace(r"\-'" , r"\-+'" )
lowerCAmelCase__ : str = regex.compile(a__ )
lowerCAmelCase__ : str = {v: k for k, v in self.artists_encoder.items()}
lowerCAmelCase__ : List[str] = {v: k for k, v in self.genres_encoder.items()}
lowerCAmelCase__ : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _A ( self : Any ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _A ( self : int , a__ : str , a__ : int , a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.artists_encoder.get(a__ , 0 ) for artist in list_artists]
for genres in range(len(a__ ) ):
lowerCAmelCase__ : List[str] = [self.genres_encoder.get(a__ , 0 ) for genre in list_genres[genres]]
lowerCAmelCase__ : Optional[int] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCAmelCase__ : str = [[self.lyrics_encoder.get(a__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _A ( self : Dict , a__ : Any ):
'''simple docstring'''
return list(a__ )
def _A ( self : Optional[Any] , a__ : List[str] , a__ : Union[str, Any] , a__ : List[str] , **a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_for_tokenization(a__ , a__ , a__ )
lowerCAmelCase__ : int = self._tokenize(a__ )
return artist, genre, lyrics
def _A ( self : List[Any] , a__ : str , a__ : str , a__ : str , a__ : bool = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCAmelCase__ : Union[str, Any] = artists[idx].lower()
lowerCAmelCase__ : Dict = [genres[idx].lower()]
else:
lowerCAmelCase__ : Dict = self._normalize(artists[idx] ) + ".v2"
lowerCAmelCase__ : Optional[Any] = [
self._normalize(a__ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCAmelCase__ : Dict = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
lowerCAmelCase__ : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
lowerCAmelCase__ : int = {vocab[index]: index + 1 for index in range(len(a__ ) )}
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Any = len(a__ ) + 1
lowerCAmelCase__ : Dict = self.vocab
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.vocab.items()}
lowerCAmelCase__ : str = ""
else:
lowerCAmelCase__ : Dict = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
lowerCAmelCase__ : Union[str, Any] = self._run_strip_accents(a__ )
lowerCAmelCase__ : Any = lyrics.replace("\\" , "\n" )
lowerCAmelCase__ : List[Any] = self.out_of_vocab.sub("" , a__ ), [], []
return artists, genres, lyrics
def _A ( self : Optional[Any] , a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = unicodedata.normalize("NFD" , a__ )
lowerCAmelCase__ : List[Any] = []
for char in text:
lowerCAmelCase__ : Tuple = unicodedata.category(a__ )
if cat == "Mn":
continue
output.append(a__ )
return "".join(a__ )
def _A ( self : int , a__ : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = (
[chr(a__ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(a__ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(a__ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
lowerCAmelCase__ : Optional[Any] = frozenset(a__ )
lowerCAmelCase__ : Union[str, Any] = re.compile(r"_+" )
lowerCAmelCase__ : str = "".join([c if c in accepted else "_" for c in text.lower()] )
lowerCAmelCase__ : Optional[int] = pattern.sub("_" , a__ ).strip("_" )
return text
def _A ( self : Any , a__ : List[str] ):
'''simple docstring'''
return " ".join(a__ )
def _A ( self : Any , a__ : Optional[Any] , a__ : Optional[Union[str, TensorType]] = None , a__ : bool = False ):
'''simple docstring'''
if not isinstance(a__ , a__ ):
lowerCAmelCase__ : int = TensorType(a__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
lowerCAmelCase__ : Optional[int] = tf.constant
lowerCAmelCase__ : Any = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
lowerCAmelCase__ : str = torch.tensor
lowerCAmelCase__ : List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
lowerCAmelCase__ : str = jnp.array
lowerCAmelCase__ : int = _is_jax
else:
lowerCAmelCase__ : Tuple = np.asarray
lowerCAmelCase__ : Optional[int] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCAmelCase__ : List[str] = [inputs]
if not is_tensor(a__ ):
lowerCAmelCase__ : str = as_tensor(a__ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self : Union[str, Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Tuple="" , a__ : Tuple="pt" ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [0, 0, 0]
lowerCAmelCase__ : int = [artist] * len(self.version )
lowerCAmelCase__ : Any = [genres] * len(self.version )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.tokenize(a__ , a__ , a__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._convert_token_to_id(a__ , a__ , a__ )
lowerCAmelCase__ : Tuple = [-INFINITY] * len(full_tokens[-1] )
lowerCAmelCase__ : List[str] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=a__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _A ( self : Optional[int] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : str = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=a__ ) )
lowerCAmelCase__ : int = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=a__ ) )
lowerCAmelCase__ : Optional[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=a__ ) )
return (artists_file, genres_file, lyrics_file)
def _A ( self : List[Any] , a__ : str , a__ : str , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.artists_decoder.get(a__ )
lowerCAmelCase__ : int = [self.genres_decoder.get(a__ ) for genre in genres_index]
lowerCAmelCase__ : Any = [self.lyrics_decoder.get(a__ ) for character in lyric_index]
return artist, genres, lyrics
| 378
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
move_disk(lowerCamelCase_ , lowerCamelCase_ )
move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
print("moving disk from" , lowerCamelCase_ , "to" , lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = int(input("Height of hanoi: " ).strip() )
move_tower(lowerCamelCase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 378
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
from __future__ import annotations
import math
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , )
)
def UpperCamelCase( ) -> None:
'''simple docstring'''
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(lowercase_ ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , lowercase_ , lowercase_ , lowercase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 161
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
snake_case_ = "CIDAS/clipseg-rd64-refined"
snake_case_ = "image_segmenter"
snake_case_ = CLIPSegForImageSegmentation
snake_case_ = ["image", "text"]
snake_case_ = ["image"]
def __init__( self : List[Any] ,*A : List[Any] ,**A : Dict ):
requires_backends(self ,["vision"] )
super().__init__(*A ,**A )
def UpperCamelCase_ ( self : int ,A : "Image" ,A : str ):
return self.pre_processor(text=[label] ,images=[image] ,padding=A ,return_tensors="pt" )
def UpperCamelCase_ ( self : str ,A : List[Any] ):
with torch.no_grad():
__A = self.model(**A ).logits
return logits
def UpperCamelCase_ ( self : List[str] ,A : str ):
__A = outputs.cpu().detach().numpy()
__A = 0
__A = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 55
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287
| 0
|
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = [1]
for i in range(2 , _A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a_ = []
a_ = list(range(_A ) )
# Find permutation
while factorials:
a_ = factorials.pop()
a_ , a_ = divmod(_A , _A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
|
from math import pow
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Optional[Any] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Union[str, Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowercase_ : List[str] = {'''allegro/herbert-base-cased''': 514}
lowercase_ : Union[str, Any] = {}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : int , lowerCamelCase_ : int=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Dict="<mask>" , lowerCamelCase_ : Optional[Any]="</s>" , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , **lowerCamelCase_ , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id]
_snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 304
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ['''input_ids''', '''attention_mask''']
UpperCAmelCase = BartTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[str]:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_a = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**__UpperCamelCase )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(__UpperCamelCase , state.pop("type" ) )
_a = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
def a_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def a_ ( self , __UpperCamelCase ) -> Any:
_a = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_a = value
def a_ ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def a_ ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def a_ ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def a_ ( self , __UpperCamelCase , __UpperCamelCase=None ) -> Tuple:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 276
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 276
| 1
|
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any]=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : str=30 , __lowercase : Optional[Any]=400 , __lowercase : Dict=True , __lowercase : int=None , __lowercase : Tuple=True , __lowercase : Optional[Any]=None , __lowercase : List[str]=True , __lowercase : List[Any]=[0.5, 0.5, 0.5] , __lowercase : Any=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {"""shortest_edge""": 18}
__a = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 225
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 488
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
snake_case = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE : int = label_idx
def _A ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[Split, str] ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = mode.value
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase_ , f'''{mode}.txt''' )
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Any = []
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=UpperCAmelCase_ , labels=UpperCAmelCase_ ) )
guid_index += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
else:
SCREAMING_SNAKE_CASE : str = line.split(" " )
words.append(splits[0] )
if len(UpperCAmelCase_ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=UpperCAmelCase_ , labels=UpperCAmelCase_ ) )
return examples
def _A ( self : Tuple , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : List ):
SCREAMING_SNAKE_CASE : List[str] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE : Optional[int] = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(UpperCAmelCase_ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def _A ( self : Optional[Any] , UpperCAmelCase_ : str ):
if path:
with open(UpperCAmelCase_ , "r" ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE : Tuple = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _A ( self : Optional[int] , UpperCAmelCase_ : str ):
if path:
with open(UpperCAmelCase_ , "r" ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE : str = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[Split, str] ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : str = mode.value
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(UpperCAmelCase_ , f'''{mode}.txt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : str = []
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
for sentence in parse_incr(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=UpperCAmelCase_ , labels=UpperCAmelCase_ ) )
guid_index += 1
return examples
def _A ( self : str , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : List ):
SCREAMING_SNAKE_CASE : Dict = 0
for sentence in parse_incr(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = preds_list[example_id]
SCREAMING_SNAKE_CASE : Any = ""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCAmelCase_ )
example_id += 1
def _A ( self : Dict , UpperCAmelCase_ : str ):
if path:
with open(UpperCAmelCase_ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 488
| 1
|
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Any:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 339
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def _a ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 16 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCamelCase_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
UpperCamelCase_ , padding="longest" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCamelCase_ ) == "1":
lowerCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
set_seed(UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase__ = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 339
| 1
|
from __future__ import annotations
import math
class __magic_name__ :
def __init__( self : Optional[int] , UpperCamelCase__ : int ) -> None:
'''simple docstring'''
UpperCAmelCase = size
# approximate the overall size of segment tree with given value
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> None:
'''simple docstring'''
if left_element == right_element:
UpperCAmelCase = a[left_element - 1]
else:
UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.build(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = max(
self.segment_tree[self.left(UpperCamelCase__ )] , self.segment_tree[self.right(UpperCamelCase__ )] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase = val
if left_element != right_element:
UpperCAmelCase = val
UpperCAmelCase = val
UpperCAmelCase = True
UpperCAmelCase = True
return True
UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.update(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = max(
self.segment_tree[self.left(UpperCamelCase__ )] , self.segment_tree[self.right(UpperCamelCase__ )] )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int | float:
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase = (left_element + right_element) // 2
UpperCAmelCase = self.query(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = self.query(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return max(UpperCamelCase__ , UpperCamelCase__ )
def __str__( self : int ) -> str:
'''simple docstring'''
return str([self.query(1 , 1 , self.size , UpperCamelCase__ , UpperCamelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__lowerCamelCase : int = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__lowerCamelCase : List[Any] = 15
__lowerCamelCase : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 720
|
from __future__ import annotations
def lowerCamelCase_(lowerCamelCase_ ) -> int:
UpperCAmelCase = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
UpperCAmelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 457
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a :Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a :bool = field(default=__lowerCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
a :int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a :bool = field(
default=__lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
lowercase_ = import_module('''tasks''' )
try:
lowercase_ = getattr(_lowerCamelCase , model_args.task_type )
lowercase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase_ = token_classification_task.get_labels(data_args.labels )
lowercase_ = dict(enumerate(_lowerCamelCase ) )
lowercase_ = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase_ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase_ = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase_ = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__: np.ndarray , snake_case__: np.ndarray ) -> Tuple[List[int], List[int]]:
lowercase_ = np.argmax(_lowerCamelCase , axis=2 )
lowercase_ , lowercase_ = preds.shape
lowercase_ = [[] for _ in range(_lowerCamelCase )]
lowercase_ = [[] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case__: EvalPrediction ) -> Dict:
lowercase_ , lowercase_ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCamelCase , _lowerCamelCase ),
"precision": precision_score(_lowerCamelCase , _lowerCamelCase ),
"recall": recall_score(_lowerCamelCase , _lowerCamelCase ),
"f1": fa_score(_lowerCamelCase , _lowerCamelCase ),
}
# Data collator
lowercase_ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase_ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ = trainer.evaluate()
lowercase_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _lowerCamelCase , _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase_ = TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase_ , lowercase_ , lowercase_ = trainer.predict(_lowerCamelCase )
lowercase_ , lowercase_ = align_predictions(_lowerCamelCase , _lowerCamelCase )
lowercase_ = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , _lowerCamelCase , _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase_ = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return results
def a ( snake_case__: Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 97
|
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.speecht5')
SCREAMING_SNAKE_CASE = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
SCREAMING_SNAKE_CASE = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
SCREAMING_SNAKE_CASE = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
SCREAMING_SNAKE_CASE = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
SCREAMING_SNAKE_CASE = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
SCREAMING_SNAKE_CASE = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
SCREAMING_SNAKE_CASE = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
SCREAMING_SNAKE_CASE = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowercase_ ( __A : Any , __A : List[str] , __A : str , __A : Optional[Any] , __A : Optional[Any] ) -> str:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : Optional[Any] =getattr(a_ , a_ )
if weight_type is not None:
lowercase : List[str] =getattr(a_ , a_ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowercase : Optional[int] =value
elif weight_type == "weight_g":
lowercase : Union[str, Any] =value
elif weight_type == "weight_v":
lowercase : Any =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : int =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Optional[Any] =value
else:
lowercase : List[Any] =value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowercase_ ( __A : str , __A : Optional[int] ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase : Optional[Any] =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __A : str , __A : str , __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : List[Any] =[]
if task == "s2t":
lowercase : Optional[int] =hf_model.speechta.encoder.prenet.feature_encoder
lowercase : int =MAPPING_S2T
lowercase : List[str] =IGNORE_KEYS_S2T
elif task == "t2s":
lowercase : int =None
lowercase : List[Any] =MAPPING_T2S
lowercase : List[Any] =IGNORE_KEYS_T2S
elif task == "s2s":
lowercase : Dict =hf_model.speechta.encoder.prenet.feature_encoder
lowercase : int =MAPPING_S2S
lowercase : Dict =IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(a_ , a_ ):
logger.info(F'{name} was ignored' )
continue
lowercase : int =False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : str =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase : List[Any] =key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase : List[Any] =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase : List[Any] =True
if "*" in mapped_key:
lowercase : Union[str, Any] =name.split(a_ )[0].split('''.''' )[-2]
lowercase : Optional[Any] =mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
lowercase : Dict ='''weight_g'''
elif "weight_v" in name:
lowercase : List[str] ='''weight_v'''
elif "bias" in name:
lowercase : List[Any] ='''bias'''
elif "weight" in name:
lowercase : List[Any] ='''weight'''
elif "running_mean" in name:
lowercase : Tuple ='''running_mean'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Tuple ='''num_batches_tracked'''
else:
lowercase : Union[str, Any] =None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __A : int , __A : str , __A : Union[str, Any] , __A : List[Any] , __A : int ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : int =name.split('''.''' )
lowercase : Optional[Any] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowercase : Optional[Any] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowercase : Optional[int] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowercase : Union[str, Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowercase : List[str] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a_ )
@torch.no_grad()
def lowercase_ ( __A : str , __A : str , __A : Tuple , __A : Union[str, Any]=None , __A : Any=None , __A : Union[str, Any]=None , ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
lowercase : int =SpeechTaConfig.from_pretrained(a_ )
else:
lowercase : List[str] =SpeechTaConfig()
if task == "s2t":
lowercase : Any =config.max_text_positions
lowercase : Optional[Any] =SpeechTaForSpeechToText(a_ )
elif task == "t2s":
lowercase : Dict =1_8_7_6
lowercase : str =6_0_0
lowercase : str =config.max_speech_positions
lowercase : List[Any] =SpeechTaForTextToSpeech(a_ )
elif task == "s2s":
lowercase : List[Any] =1_8_7_6
lowercase : Optional[int] =config.max_speech_positions
lowercase : Optional[int] =SpeechTaForSpeechToSpeech(a_ )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
lowercase : Any =SpeechTaTokenizer(a_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase : Optional[Any] =AddedToken('''<mask>''' , lstrip=a_ , rstrip=a_ )
lowercase : int =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
lowercase : int =SpeechTaFeatureExtractor()
lowercase : Dict =SpeechTaProcessor(tokenizer=a_ , feature_extractor=a_ )
processor.save_pretrained(a_ )
lowercase : str =torch.load(a_ )
recursively_load_weights(fairseq_checkpoint['''model'''] , a_ , a_ )
model.save_pretrained(a_ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 712
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8
| 0
|
import datasets
UpperCAmelCase_ : str = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase_ : str = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase_ : Any = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ,a__ : str ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowerCAmelCase_ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase_ ( self : List[Any] , __A : List[Any] , __A : Optional[Any] ):
return {"accuracy": simple_accuracy(__A , __A )}
| 17
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "swinv2"
_A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , A_ : Optional[Any]=2_24 , A_ : int=4 , A_ : Any=3 , A_ : List[Any]=96 , A_ : Any=[2, 2, 6, 2] , A_ : str=[3, 6, 12, 24] , A_ : Any=7 , A_ : List[Any]=4.0 , A_ : List[str]=True , A_ : int=0.0 , A_ : str=0.0 , A_ : Dict=0.1 , A_ : Any="gelu" , A_ : int=False , A_ : Optional[int]=0.02 , A_ : Dict=1e-5 , A_ : int=32 , **A_ : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: str = image_size
lowerCamelCase_: Dict = patch_size
lowerCamelCase_: int = num_channels
lowerCamelCase_: Union[str, Any] = embed_dim
lowerCamelCase_: Optional[int] = depths
lowerCamelCase_: Optional[int] = len(A_ )
lowerCamelCase_: Dict = num_heads
lowerCamelCase_: str = window_size
lowerCamelCase_: List[str] = mlp_ratio
lowerCamelCase_: Any = qkv_bias
lowerCamelCase_: int = hidden_dropout_prob
lowerCamelCase_: str = attention_probs_dropout_prob
lowerCamelCase_: List[str] = drop_path_rate
lowerCamelCase_: Any = hidden_act
lowerCamelCase_: List[str] = use_absolute_embeddings
lowerCamelCase_: Union[str, Any] = layer_norm_eps
lowerCamelCase_: Dict = initializer_range
lowerCamelCase_: Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_: int = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_: List[str] = (0, 0, 0, 0)
| 423
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : Tuple, _snake_case : int, _snake_case : Dict=7, _snake_case : int=3, _snake_case : Optional[int]=18, _snake_case : Optional[Any]=30, _snake_case : Any=400, _snake_case : List[Any]=None, _snake_case : Optional[Any]=True, _snake_case : Dict=True, _snake_case : int=None, ):
'''simple docstring'''
snake_case : str =size if size is not None else {'''height''': 20, '''width''': 20}
snake_case : str =parent
snake_case : List[Any] =batch_size
snake_case : str =num_channels
snake_case : Any =image_size
snake_case : Optional[Any] =min_resolution
snake_case : str =max_resolution
snake_case : Any =size
snake_case : str =do_normalize
snake_case : Tuple =do_convert_rgb
snake_case : Optional[Any] =[512, 1_024, 2_048, 4_096]
snake_case : int =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __snake_case ( self : List[str] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : str ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
snake_case : Union[str, Any] =Image.open(requests.get(_snake_case, stream=_snake_case ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Any =PixaStructImageProcessingTester(self )
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[str] ):
'''simple docstring'''
snake_case : List[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case, '''do_convert_rgb''' ) )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : str =self.image_processor_tester.prepare_dummy_image()
snake_case : str =self.image_processing_class(**self.image_processor_dict )
snake_case : Tuple =2_048
snake_case : Optional[int] =image_processor(_snake_case, return_tensors='''pt''', max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1E-3, rtol=1E-3 ) )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : int =prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case : Dict =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Optional[int] =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case : Dict =image_processor(
_snake_case, return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any =prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case : str =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
snake_case : int =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
snake_case : Union[str, Any] =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
snake_case : Any ='''Hello'''
snake_case : Optional[Any] =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case : Dict =image_processor(
_snake_case, return_tensors='''pt''', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Dict =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Dict =prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
snake_case : Optional[int] =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : List[str] =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case : List[Any] =image_processor(
_snake_case, return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[Any] =prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case : List[Any] =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Optional[int] =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case : Optional[Any] =image_processor(
_snake_case, return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : str =PixaStructImageProcessingTester(self, num_channels=4 )
snake_case : List[Any] =3
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
snake_case : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case, '''do_convert_rgb''' ) )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict =prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case : Optional[Any] =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Tuple =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case : Optional[Any] =image_processor(
_snake_case, return_tensors='''pt''', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 349
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Optional[Any] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
snake_case__ : Optional[int] = get_tests_dir("""fixtures""")
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_ = mock.Mock()
UpperCamelCase_ = 500
UpperCamelCase_ = {}
UpperCamelCase_ = HTTPError
UpperCamelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase ) as mock_head:
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Optional[Any]:
UpperCamelCase_ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase , repo_id='test-feature-extractor' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCamelCase_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase_ = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 717
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 6, 8] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2 , ) -> int:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = kernel_size
UpperCamelCase_ = stride
UpperCamelCase_ = padding
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = depths
UpperCamelCase_ = key_dim
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = patch_size
UpperCamelCase_ = attention_ratio
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = initializer_range
UpperCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = LevitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Any:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_ , UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple:
UpperCamelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase_ = problem_type['title']
UpperCamelCase_ = problem_type['num_labels']
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
UpperCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCamelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> str:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCamelCase_ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 618
| 0
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['LayoutLMv3FeatureExtractor']
UpperCamelCase__ = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 620
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : int =logging.get_logger(__name__)
a__ : List[str] ='''▁'''
a__ : Any ={'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ : Optional[Any] ={
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
a__ : Optional[Any] ={
'''facebook/xglm-564M''': 2_048,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] =["input_ids", "attention_mask"]
def __init__( self : Any , __A : Union[str, Any] , __A : Optional[int]="<s>" , __A : List[Any]="</s>" , __A : Dict="</s>" , __A : int="<s>" , __A : Tuple="<unk>" , __A : Tuple="<pad>" , __A : Optional[Dict[str, Any]] = None , **__A : List[str] , ):
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__UpperCamelCase = 7
__UpperCamelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__UpperCamelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__UpperCamelCase = len(self.sp_model )
__UpperCamelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__A )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __A : Optional[Any] ):
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__UpperCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _lowerCamelCase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A ))
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A ))
def _lowerCamelCase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _lowerCamelCase ( self : Tuple ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : Any , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : Optional[Any] , __A : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self : str , __A : Tuple ):
__UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip()
return out_string
def _lowerCamelCase ( self : int , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 709
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : str , __A : List[Any]=7 , __A : Union[str, Any]=3 , __A : Optional[Any]=3_0 , __A : str=4_0_0 , __A : Tuple=True , __A : Any=None , __A : int=True , __A : Dict=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : str=1 / 2_5_5 , __A : int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCamelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
def _lowerCamelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : List[Any] , __A : List[str] , __A : Union[str, Any]=False ):
if not batched:
__UpperCamelCase = image_inputs[0]
if isinstance(__A , Image.Image ):
__UpperCamelCase , __UpperCamelCase = image.size
else:
__UpperCamelCase , __UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase = int(self.size['shortest_edge'] * h / w )
__UpperCamelCase = self.size['shortest_edge']
elif w > h:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = self.size['shortest_edge']
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(__A , key=lambda __A : item[0] )[0]
__UpperCamelCase = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def _lowerCamelCase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'do_rescale' ) )
self.assertTrue(hasattr(__A , 'do_pad' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
__UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowerCamelCase ( self : int ):
pass
def _lowerCamelCase ( self : Optional[Any] ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Tuple ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Dict ):
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : Dict ):
# prepare image and target
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__UpperCamelCase = DeformableDetrImageProcessor()
__UpperCamelCase = image_processing(images=__A , annotations=__A , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__UpperCamelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1e-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCamelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify orig_size
__UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
# prepare image, target and masks_path
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__UpperCamelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCamelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
__UpperCamelCase = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='pt' )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__UpperCamelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1e-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCamelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify masks
__UpperCamelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __A )
# verify orig_size
__UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
| 434
| 0
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase__ ( snake_case_ :Dict ):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase__ ( snake_case_ :Tuple ):
class _UpperCAmelCase :
def __init__( self : Any , _lowercase : str ):
__UpperCAmelCase = metric_id
class _UpperCAmelCase :
a__ : int = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def a ( self : int ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :Any , snake_case_ :List[str] ):
if "tmp_path" in args:
__UpperCAmelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case_ , match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case_ )
| 49
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : str = parent
A : Optional[int] = 13
A : Union[str, Any] = 7
A : Optional[Any] = 30
A : Optional[int] = self.seq_length + self.mem_len
A : int = 15
A : Any = True
A : Union[str, Any] = True
A : Optional[int] = 99
A : Any = [10, 50, 80]
A : Any = 32
A : Any = 32
A : Any = 4
A : Any = 8
A : Optional[int] = 128
A : Union[str, Any] = 2
A : List[str] = 2
A : Optional[Any] = None
A : int = 1
A : str = 0
A : Optional[int] = 3
A : Union[str, Any] = self.vocab_size - 1
A : str = 0.01
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : List[str] = None
if self.use_labels:
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = TFTransfoXLModel(SCREAMING_SNAKE_CASE )
A, A : int = model(SCREAMING_SNAKE_CASE ).to_tuple()
A : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
A, A : Any = model(SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Tuple = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE )
A, A : Optional[int] = model(SCREAMING_SNAKE_CASE ).to_tuple()
A : Tuple = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
A, A : Dict = model(SCREAMING_SNAKE_CASE ).to_tuple()
A, A : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
A : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
A, A : List[Any] = model(SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[Any] = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE )
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.prepare_config_and_inputs()
((A), (A), (A), (A)) : Dict = config_and_inputs
A : List[str] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = () if is_tf_available() else ()
__magic_name__ = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : str = TFTransfoXLModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , d_embed=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.model_tester.set_seed()
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.model_tester.set_seed()
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A : int = model_class(SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A : List[Any] = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer )
A : List[Any] = model.get_bias()
assert name is None
else:
A : int = model.get_output_embeddings()
assert x is None
A : List[Any] = model.get_bias()
assert name is None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
pass
@require_tf
class A ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[str] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
A : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A : str = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A : Optional[Any] = model.generate(SCREAMING_SNAKE_CASE , max_length=200 , do_sample=SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE )
| 634
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : int = logging.get_logger(__name__)
def __magic_name__( _A , _A=False ):
'''simple docstring'''
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __magic_name__( _A , _A , _A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = """"""
else:
UpperCamelCase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(_A )
UpperCamelCase__ = val
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __magic_name__( _A , _A , _A=True ):
'''simple docstring'''
UpperCamelCase__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ = 8
# set labels if required
if not base_model:
UpperCamelCase__ = 1000
UpperCamelCase__ = """huggingface/label-files"""
UpperCamelCase__ = """imagenet-1k-id2label.json"""
UpperCamelCase__ = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ = {int(_A ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ = 384
UpperCamelCase__ = 1536
UpperCamelCase__ = 12
UpperCamelCase__ = 6
# load original model from torch hub
UpperCamelCase__ = torch.hub.load("""facebookresearch/dino:main""" , _A )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ = original_model.state_dict()
if base_model:
remove_classification_head_(_A )
UpperCamelCase__ = create_rename_keys(_A , base_model=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , _A )
# load HuggingFace model
if base_model:
UpperCamelCase__ = ViTModel(_A , add_pooling_layer=_A ).eval()
else:
UpperCamelCase__ = ViTForImageClassification(_A ).eval()
model.load_state_dict(_A )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ = ViTImageProcessor()
UpperCamelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCamelCase__ = encoding["""pixel_values"""]
UpperCamelCase__ = model(_A )
if base_model:
UpperCamelCase__ = original_model(_A )
assert torch.allclose(_A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ = original_model(_A )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_A , outputs.logits , atol=1e-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCamelCase_ : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 265
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : int = "microsoft/speecht5_tts"
__a : int = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
__a : Optional[Any] = "text_reader"
__a : Tuple = SpeechTaProcessor
__a : int = SpeechTaForTextToSpeech
__a : int = SpeechTaHifiGan
__a : Optional[int] = ["text"]
__a : Union[str, Any] = ["audio"]
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
if self.post_processor is None:
UpperCamelCase__ = """microsoft/speecht5_hifigan"""
super().setup()
def A ( self : List[Any] , lowercase : Any , lowercase : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.pre_processor(text=lowercase , return_tensors="""pt""" , truncation=lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
UpperCamelCase__ = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
UpperCamelCase__ = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def A ( self : Union[str, Any] , lowercase : Optional[int] ) -> int:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowercase )
def A ( self : str , lowercase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowercase ).cpu().detach()
| 265
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _UpperCamelCase ( _snake_case ):
'''simple docstring'''
lowerCAmelCase__ = """MCTCTFeatureExtractor"""
lowerCAmelCase__ = """AutoTokenizer"""
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict):
'''simple docstring'''
super().__init__(_lowerCamelCase , _lowerCamelCase)
__lowercase =self.feature_extractor
__lowercase =False
def __call__( self : Dict , *_lowerCAmelCase : Any , **_lowerCAmelCase : int):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
__lowercase =kwargs.pop('raw_speech')
else:
__lowercase =kwargs.pop('audio' , _lowerCamelCase)
__lowercase =kwargs.pop('sampling_rate' , _lowerCamelCase)
__lowercase =kwargs.pop('text' , _lowerCamelCase)
if len(_lowerCamelCase) > 0:
__lowercase =args[0]
__lowercase =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
__lowercase =self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase)
if text is not None:
__lowercase =self.tokenizer(_lowerCamelCase , **_lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase =encodings['''input_ids''']
return inputs
def __lowerCamelCase ( self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : str , **_lowerCAmelCase : Any):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase)
__lowercase =kwargs.pop('input_features' , _lowerCamelCase)
__lowercase =kwargs.pop('labels' , _lowerCamelCase)
if len(_lowerCamelCase) > 0:
__lowercase =args[0]
__lowercase =args[1:]
if input_features is not None:
__lowercase =self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase)
if labels is not None:
__lowercase =self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowercase =labels['''input_ids''']
return input_features
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@contextmanager
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
__lowercase =True
__lowercase =self.tokenizer
yield
__lowercase =self.feature_extractor
__lowercase =False
| 474
|
import qiskit
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
a :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
a :str = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
a :Optional[Any] = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Any = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 445
| 0
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase ( __a , unittest.TestCase ):
a__ :Dict = XLMProphetNetTokenizer
a__ :List[Any] = False
a__ :str = True
def A_ (self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : str = XLMProphetNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ (self ) -> str:
UpperCamelCase_ : int = """[PAD]"""
UpperCamelCase_ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def A_ (self ) -> int:
UpperCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCamelCase ) , 1_012 )
def A_ (self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def A_ (self ) -> List[str]:
UpperCamelCase_ : Dict = XLMProphetNetTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_ : List[str] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase_ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def A_ (self ) -> Optional[int]:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : List[Any] = """Hello World!"""
UpperCamelCase_ : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def A_ (self ) -> Optional[int]:
# fmt: off
UpperCamelCase_ : Optional[int] = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 705
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase ) -> Tuple:
UpperCamelCase_ : str = data
def __iter__(self ) -> Tuple:
for element in self.data:
yield element
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
UpperCamelCase_ : Dict = Accelerator(even_batches=_SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ):
if iterable:
UpperCamelCase_ : Any = DummyIterableDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) )
else:
UpperCamelCase_ : Optional[int] = TensorDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase_ : int = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = accelerator.prepare(_SCREAMING_SNAKE_CASE )
return dl
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , ):
UpperCamelCase_ : int = create_dataloader(accelerator=_SCREAMING_SNAKE_CASE , dataset_size=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Optional[int] = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
UpperCamelCase_ : List[str] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : int = ddp_model(batch[0].float() )
UpperCamelCase_ : Optional[Any] = output.sum()
loss.backward()
batch_idxs.append(_SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Any = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[int] = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : Dict = accelerator.prepare(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
UpperCamelCase_ : List[str] = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Tuple = train_dl.batch_sampler.even_batches
UpperCamelCase_ : int = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase_ ( ):
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : List[str] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : List[str] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Dict = create_accelerator()
UpperCamelCase_ : int = torch.nn.Linear(1 , 1 )
UpperCamelCase_ : List[Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Dict = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
UpperCamelCase_ : str = accelerator.state.distributed_type
UpperCamelCase_ : int = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Tuple = original_state
if __name__ == "__main__":
main()
| 138
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "Speech2TextFeatureExtractor"
SCREAMING_SNAKE_CASE__ : List[str] = "Speech2TextTokenizer"
def __init__( self: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
def __call__( self: List[str] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Union[str, Any] , *_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _A ( self: List[Any] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 234
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
SCREAMING_SNAKE_CASE_ = F"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , '''README.md''' )
print(F"Generating {path}" )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE =Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE =repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =model_name.split("""-""")
__SCREAMING_SNAKE_CASE =model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 234
| 1
|
from timeit import timeit
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : int = 0
A : Dict = len(_lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
A : Union[str, Any] = len(_lowerCAmelCase ) // 2
A : List[str] = len(_lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCAmelCase ) )
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if len(_lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(_lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return s == s[::-1]
def __UpperCamelCase ( _lowerCAmelCase ) -> None:
"""simple docstring"""
A : Union[str, Any] = f'''all({name}(key) is value for key, value in test_data.items())'''
A : Union[str, Any] = f'''from __main__ import test_data, {name}'''
A : Union[str, Any] = 50_0000
A : Tuple = timeit(stmt=_lowerCAmelCase , setup=_lowerCAmelCase , number=_lowerCAmelCase )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 708
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Dict = """▁"""
SCREAMING_SNAKE_CASE_:Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = BigBirdTokenizer
__lowerCamelCase : int = BigBirdTokenizerFast
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Optional[int] = self.tokenizer_class(lowerCamelCase__, keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[int] = """<s>"""
A : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """[MASK]""" )
self.assertEqual(len(lowerCamelCase__ ), 1004 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1000 )
def _lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
A : Tuple = self.get_tokenizer()
A : Optional[int] = self.get_rust_tokenizer()
A : Tuple = """I was born in 92000, and this is falsé."""
A : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
A : int = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : List[str] = rust_tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = self.get_rust_tokenizer()
A : Tuple = tokenizer.encode(lowerCamelCase__ )
A : int = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = BigBirdTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__ )
A : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [285, 46, 10, 170, 382], )
A : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
A : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
A : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
@cached_property
def _lowerCAmelCase ( self ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """Hello World!"""
A : Union[str, Any] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self ):
A : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
A : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def _lowerCAmelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
A : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
A : Optional[Any] = """ """.join(lowerCamelCase__ )
A : int = self.big_tokenizer.encode_plus(lowerCamelCase__, return_tensors="""pt""", return_token_type_ids=lowerCamelCase__ )
A : int = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence], return_tensors="""pt""", return_token_type_ids=lowerCamelCase__ )
A : Tuple = BigBirdConfig(attention_type="""original_full""" )
A : List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
A : Optional[Any] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : Any = {"""input_ids""": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""google/bigbird-roberta-base""", revision="""215c99f1600e06f83acce68422f2035b2b5c3510""", )
| 520
| 0
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def lowercase_ ( __A : int ) -> List[str]:
"""simple docstring"""
lowercase : Dict =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_snake_case )[0]
@deprecated(_snake_case , '''Please use tf.data to implement this functionality.''' )
def lowercase_ ( __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_snake_case ) as bytestream:
lowercase : Optional[int] =_readaa(_snake_case )
if magic != 2_0_5_1:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase : Optional[int] =_readaa(_snake_case )
lowercase : Optional[int] =_readaa(_snake_case )
lowercase : List[str] =_readaa(_snake_case )
lowercase : str =bytestream.read(rows * cols * num_images )
lowercase : List[str] =numpy.frombuffer(_snake_case , dtype=numpy.uinta )
lowercase : List[str] =data.reshape(_snake_case , _snake_case , _snake_case , 1 )
return data
@deprecated(_snake_case , '''Please use tf.one_hot on tensors.''' )
def lowercase_ ( __A : List[str] , __A : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] =labels_dense.shape[0]
lowercase : List[Any] =numpy.arange(_snake_case ) * num_classes
lowercase : Optional[Any] =numpy.zeros((num_labels, num_classes) )
lowercase : List[Any] =1
return labels_one_hot
@deprecated(_snake_case , '''Please use tf.data to implement this functionality.''' )
def lowercase_ ( __A : Dict , __A : int=False , __A : Tuple=1_0 ) -> int:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_snake_case ) as bytestream:
lowercase : Optional[Any] =_readaa(_snake_case )
if magic != 2_0_4_9:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase : int =_readaa(_snake_case )
lowercase : Tuple =bytestream.read(_snake_case )
lowercase : List[str] =numpy.frombuffer(_snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_snake_case , _snake_case )
return labels
class UpperCAmelCase_ :
"""simple docstring"""
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : str=dtypes.floataa , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=None , ) -> str:
'''simple docstring'''
lowercase , lowercase : Tuple =random_seed.get_seed(_UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase : Optional[int] =dtypes.as_dtype(_UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowercase : Dict =1_0000
lowercase : Optional[Any] =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowercase : Tuple =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase : Optional[int] =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase : int =images.astype(numpy.floataa )
lowercase : List[str] =numpy.multiply(_UpperCAmelCase , 1.0 / 2_5_5.0 )
lowercase : Optional[Any] =images
lowercase : Dict =labels
lowercase : Dict =0
lowercase : Tuple =0
@property
def A__ ( self : Dict ) -> int:
'''simple docstring'''
return self._images
@property
def A__ ( self : int ) -> Any:
'''simple docstring'''
return self._labels
@property
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._num_examples
@property
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return self._epochs_completed
def A__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=True ) -> Dict:
'''simple docstring'''
if fake_data:
lowercase : List[str] =[1] * 784
lowercase : Dict =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_UpperCAmelCase )],
[fake_label for _ in range(_UpperCAmelCase )],
)
lowercase : Union[str, Any] =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase : List[str] =numpy.arange(self._num_examples )
numpy.random.shuffle(_UpperCAmelCase )
lowercase : Tuple =self.images[perma]
lowercase : Optional[Any] =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase : Optional[Any] =self._num_examples - start
lowercase : Union[str, Any] =self._images[start : self._num_examples]
lowercase : Tuple =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase : List[str] =numpy.arange(self._num_examples )
numpy.random.shuffle(_UpperCAmelCase )
lowercase : Union[str, Any] =self.images[perm]
lowercase : Union[str, Any] =self.labels[perm]
# Start next epoch
lowercase : List[str] =0
lowercase : str =batch_size - rest_num_examples
lowercase : Any =self._index_in_epoch
lowercase : Optional[Any] =self._images[start:end]
lowercase : int =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase : List[str] =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_snake_case , '''Please write your own downloading logic.''' )
def lowercase_ ( __A : Optional[Any] , __A : Dict , __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not gfile.Exists(_snake_case ):
gfile.MakeDirs(_snake_case )
lowercase : Any =os.path.join(_snake_case , _snake_case )
if not gfile.Exists(_snake_case ):
urllib.request.urlretrieve(_snake_case , _snake_case ) # noqa: S310
with gfile.GFile(_snake_case ) as f:
lowercase : Any =f.size()
print('''Successfully downloaded''' , _snake_case , _snake_case , '''bytes.''' )
return filepath
@deprecated(
_snake_case , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowercase_ ( __A : Dict , __A : List[str]=False , __A : Optional[Any]=False , __A : int=dtypes.floataa , __A : Union[str, Any]=True , __A : int=5_0_0_0 , __A : Any=None , __A : str=DEFAULT_SOURCE_URL , ) -> Union[str, Any]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_snake_case , one_hot=_snake_case , dtype=_snake_case , seed=_snake_case )
lowercase : Optional[Any] =fake()
lowercase : Union[str, Any] =fake()
lowercase : Optional[Any] =fake()
return _Datasets(train=_snake_case , validation=_snake_case , test=_snake_case )
if not source_url: # empty string check
lowercase : Dict =DEFAULT_SOURCE_URL
lowercase : List[Any] ='''train-images-idx3-ubyte.gz'''
lowercase : str ='''train-labels-idx1-ubyte.gz'''
lowercase : Any ='''t10k-images-idx3-ubyte.gz'''
lowercase : int ='''t10k-labels-idx1-ubyte.gz'''
lowercase : List[Any] =_maybe_download(
_snake_case , _snake_case , source_url + train_images_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : Dict =_extract_images(_snake_case )
lowercase : Any =_maybe_download(
_snake_case , _snake_case , source_url + train_labels_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : Tuple =_extract_labels(_snake_case , one_hot=_snake_case )
lowercase : str =_maybe_download(
_snake_case , _snake_case , source_url + test_images_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : str =_extract_images(_snake_case )
lowercase : int =_maybe_download(
_snake_case , _snake_case , source_url + test_labels_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : List[Any] =_extract_labels(_snake_case , one_hot=_snake_case )
if not 0 <= validation_size <= len(_snake_case ):
lowercase : Optional[Any] =(
'''Validation size should be between 0 and '''
F'{len(_snake_case )}. Received: {validation_size}.'
)
raise ValueError(_snake_case )
lowercase : int =train_images[:validation_size]
lowercase : Tuple =train_labels[:validation_size]
lowercase : List[str] =train_images[validation_size:]
lowercase : Dict =train_labels[validation_size:]
lowercase : Tuple ={'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowercase : int =_DataSet(_snake_case , _snake_case , **_snake_case )
lowercase : Dict =_DataSet(_snake_case , _snake_case , **_snake_case )
lowercase : List[Any] =_DataSet(_snake_case , _snake_case , **_snake_case )
return _Datasets(train=_snake_case , validation=_snake_case , test=_snake_case )
| 94
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7
| 0
|
_lowerCamelCase : Optional[int] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowerCamelCase : List[Any] = concatenate_datasets
_lowerCamelCase : int = DownloadConfig
_lowerCamelCase : Tuple = DownloadManager
_lowerCamelCase : Optional[int] = DownloadMode
_lowerCamelCase : Any = DownloadConfig
_lowerCamelCase : Tuple = DownloadMode
_lowerCamelCase : Tuple = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 710
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = num_of_nodes
UpperCamelCase = []
UpperCamelCase = {}
def A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def A ( self : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A ( self : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase = self.find_component(UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase = self.find_component(UpperCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase , UpperCamelCase , UpperCamelCase = edge
UpperCamelCase = self.m_component[u]
UpperCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase , UpperCamelCase , UpperCamelCase = edge
UpperCamelCase = self.m_component[u]
UpperCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCamelCase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :str = ReformerTokenizer
_UpperCAmelCase :List[str] = ReformerTokenizerFast
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Dict = True
def UpperCAmelCase__ ( self : int ):
super().setUp()
lowerCamelCase_ : int =ReformerTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Any ="<s>"
lowerCamelCase_ : List[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 1000 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : List[str] =self.get_tokenizer()
lowerCamelCase_ : int =self.get_rust_tokenizer()
lowerCamelCase_ : List[str] ="I was born in 92000, and this is falsé."
lowerCamelCase_ : Any =tokenizer.tokenize(snake_case__ )
lowerCamelCase_ : Tuple =rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCamelCase_ : Any =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCamelCase_ : str =rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[int] =self.get_rust_tokenizer()
lowerCamelCase_ : Optional[int] =tokenizer.encode(snake_case__ )
lowerCamelCase_ : int =rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# Simple input
lowerCamelCase_ : Optional[int] ="This is a simple input"
lowerCamelCase_ : str =["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ : List[Any] =("This is a simple input", "This is a pair")
lowerCamelCase_ : Dict =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
def UpperCAmelCase__ ( self : List[Any] ):
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[str] =ReformerTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCamelCase_ : Optional[Any] =tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ : Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ : Union[str, Any] =tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ : Optional[int] =tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Union[str, Any] ="Hello World!"
lowerCamelCase_ : int =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Any =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase_ : int =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase_ : Optional[int] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ : str =" ".join(snake_case__ )
lowerCamelCase_ : Optional[int] =self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
lowerCamelCase_ : str =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
lowerCamelCase_ : Tuple =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase_ : Any =encoded_sequence["input_ids"].shape
lowerCamelCase_ : Dict =ReformerModel(snake_case__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
# fmt: off
lowerCamelCase_ : Optional[Any] ={"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase_ : Dict =[
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=snake_case__ , sequences=snake_case__ , )
| 153
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase__ ( snake_case__ ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( snake_case__ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Union[str, Any] ):
raise NotImplementedError()
| 153
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :List[str]=0 ) -> Dict:
UpperCAmelCase__ = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
UpperCAmelCase__ = np.random.RandomState(A_ )
UpperCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**A_ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self :str ) -> Union[str, Any]:
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**A_ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
UpperCAmelCase__ = pipe(**self.get_dummy_inputs() )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**A_ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**A_ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**A_ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self :str ) -> Optional[Any]:
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = self.get_dummy_inputs()
UpperCAmelCase__ = pipe(**A_ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase__ = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self :Dict ) -> str:
UpperCAmelCase__ = ort.SessionOptions()
UpperCAmelCase__ = False
return options
def UpperCAmelCase_ ( self :str ) -> Tuple:
UpperCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase__ = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ = np.random.RandomState(0 )
UpperCAmelCase__ = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="np" , )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase__ = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
UpperCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase__ = init_image.resize((768, 512) )
UpperCAmelCase__ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCAmelCase__ = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ = np.random.RandomState(0 )
UpperCAmelCase__ = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="np" , )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase__ = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 700
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCamelCase :
def __init__( self :Any , lowerCamelCase :List[str] , lowerCamelCase :Optional[int]=13 , lowerCamelCase :Optional[Any]=2 , lowerCamelCase :Any=24 , lowerCamelCase :Union[str, Any]=16 , lowerCamelCase :Any=True , lowerCamelCase :int=True , lowerCamelCase :Optional[Any]=32 , lowerCamelCase :Union[str, Any]=5 , lowerCamelCase :Tuple=4 , lowerCamelCase :Optional[Any]=37 , lowerCamelCase :Optional[Any]="gelu" , lowerCamelCase :int=0.1 , lowerCamelCase :Tuple=0.1 , lowerCamelCase :List[str]=10 , lowerCamelCase :Optional[Any]=0.02 , lowerCamelCase :Optional[int]=None , lowerCamelCase :Optional[Any]=2 , lowerCamelCase :List[Any]=2 , ) -> Union[str, Any]:
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = frequency_stride
UpperCAmelCase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ = num_patches + 2
def UpperCAmelCase_ ( self :int ) -> List[str]:
UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, input_values, labels
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :List[str] , lowerCamelCase :List[str] , lowerCamelCase :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = ASTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[Any] ) -> str:
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[int] , lowerCamelCase :List[Any] , lowerCamelCase :str , lowerCamelCase :List[Any] , lowerCamelCase :int ) -> str:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase_ ( self :List[str] ) -> int:
UpperCAmelCase__ = ASTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self :Tuple ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self :Tuple ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :int ) -> Optional[Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ASTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :str ) -> Dict:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase )
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio()
UpperCAmelCase__ = audio.squeeze().numpy()
UpperCAmelCase__ = feature_extractor(lowerCamelCase , sampling_rate=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
UpperCAmelCase__ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 364
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case ):
def get_masked_lm_array(__snake_case ):
_UpperCamelCase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_array(__snake_case ):
_UpperCamelCase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_layer_array(__snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_attention_layer_array(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array.reshape(__snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
print(f"""Loading model based on config from {config_path}...""" )
_UpperCamelCase = BertConfig.from_json_file(__snake_case )
_UpperCamelCase = BertForMaskedLM(__snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCamelCase = layer.attention.self
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCamelCase = layer.attention.output
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
_UpperCamelCase = layer.intermediate
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/bias''' )
# Output
_UpperCamelCase = layer.output
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/bias''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/beta''' )
# Embeddings
_UpperCamelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_UpperCamelCase = model.cls.predictions.transform
_UpperCamelCase = get_masked_lm_array('''dense/kernel''' )
_UpperCamelCase = get_masked_lm_array('''dense/bias''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/gamma''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/beta''' )
_UpperCamelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
_UpperCamelCase = BertPooler(config=__snake_case )
_UpperCamelCase = get_encoder_array('''_pooler_layer/kernel''' )
_UpperCamelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__snake_case )
# Integration test - should load without any errors ;)
_UpperCamelCase = BertForMaskedLM.from_pretrained(__snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 10
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_A = TypeVar("""T""")
_A = TypeVar("""U""")
class _lowerCamelCase ( Generic[T, U] ):
def __init__( self : List[Any] , UpperCamelCase : T | None , UpperCamelCase : U | None ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = key
lowerCAmelCase__ : Union[str, Any] = val
lowerCAmelCase__ : DoubleLinkedListNode[T, U] | None = None
lowerCAmelCase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _lowerCamelCase ( Generic[T, U] ):
def __init__( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.rear, self.head
def __repr__( self : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = ["""DoubleLinkedList"""]
lowerCAmelCase__ : int = self.head
while node.next is not None:
rep.append(str(UpperCamelCase ) )
lowerCAmelCase__ : Optional[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : DoubleLinkedListNode[T, U] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCAmelCase__ : List[str] = node
lowerCAmelCase__ : int = previous
lowerCAmelCase__ : Optional[int] = node
lowerCAmelCase__ : Dict = self.rear
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
lowerCAmelCase__ : int = node.next
lowerCAmelCase__ : str = node.prev
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[Any] = None
return node
class _lowerCamelCase ( Generic[T, U] ):
_lowerCamelCase :dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Dict , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCAmelCase__ : Tuple = capacity
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Optional[Any] , UpperCamelCase : T ) -> bool:
"""simple docstring"""
return key in self.cache
def _lowerCAmelCase ( self : Any , UpperCamelCase : T ) -> U | None:
"""simple docstring"""
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCAmelCase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCAmelCase__ : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase )
return node.val
self.miss += 1
return None
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : T , UpperCamelCase : U ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCAmelCase__ : List[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCAmelCase__ : Union[str, Any] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCAmelCase__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCAmelCase__ : Any = value
self.list.add(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Any , UpperCamelCase : int = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(UpperCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCAmelCase__ : List[Any] = LRUCache(UpperCamelCase )
lowerCAmelCase__ : Dict = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCAmelCase__ : str = func(*UpperCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase , """cache_info""" , UpperCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
def __UpperCAmelCase ( lowerCamelCase_ : int = 2_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * (pence + 1)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 685
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:List[str] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:str = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662
|
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : List[str] , _A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[str]:
"""simple docstring"""
super().__init__()
snake_case_ : Tuple = nn.ModuleList(_A )
def UpperCAmelCase_ ( self : List[str] , _A : torch.FloatTensor , _A : Union[torch.Tensor, float, int] , _A : torch.Tensor , _A : List[torch.tensor] , _A : List[float] , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[Dict[str, Any]] = None , _A : bool = False , _A : bool = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(_A , _A , self.nets ) ):
snake_case_ ,snake_case_ : Optional[int] = controlnet(
_A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , )
# merge samples
if i == 0:
snake_case_ ,snake_case_ : Dict = down_samples, mid_sample
else:
snake_case_ : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_A , _A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase_ ( self : str , _A : Union[str, os.PathLike] , _A : bool = True , _A : Callable = None , _A : bool = False , _A : Optional[str] = None , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_A , is_main_process=_A , save_function=_A , safe_serialization=_A , variant=_A , )
idx += 1
snake_case_ : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def UpperCAmelCase_ ( cls : Tuple , _A : Optional[Union[str, os.PathLike]] , **_A : int ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
snake_case_ : Tuple = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ : int = pretrained_model_path
while os.path.isdir(_A ):
snake_case_ : Optional[Any] = ControlNetModel.from_pretrained(_A , **_A )
controlnets.append(_A )
idx += 1
snake_case_ : Optional[int] = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(_A )} controlnets loaded from {pretrained_model_path}.""" )
if len(_A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(_A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(_A )
| 534
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_SCREAMING_SNAKE_CASE = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_SCREAMING_SNAKE_CASE = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Any ) -> Optional[Any]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_A )
def UpperCAmelCase_ ( self : Dict , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Tuple = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def UpperCAmelCase_ ( self : int , _A : Optional[Any] , _A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = F"""facebook/wmt19-{pair}"""
snake_case_ : List[Any] = self.get_tokenizer(_A )
snake_case_ : List[str] = self.get_model(_A )
snake_case_ : Union[str, Any] = bleu_data[pair]['src']
snake_case_ : List[str] = bleu_data[pair]['tgt']
snake_case_ : Optional[int] = tokenizer(_A , return_tensors='pt' , truncation=_A , padding='longest' ).to(_A )
snake_case_ : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case_ : Dict = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
snake_case_ : Tuple = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores['bleu'] , _A )
| 534
| 1
|
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
snake_case__ = []
snake_case__ = 11
snake_case__ = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
snake_case__ = 10
return solutions
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 2 ) -> int:
snake_case__ = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
snake_case__ = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 33
|
def A__ ( lowercase: int ) -> bool:
if not isinstance(lowercase, lowercase ):
A : Any =F'Input value of [number={number}] must be an integer'
raise TypeError(lowercase )
if number < 0:
return False
A : Union[str, Any] =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _a ( UpperCAmelCase__ ) -> dict[str, str]:
__SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() )
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# First fill cipher with key characters
__SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCAmelCase__ ) , 26 ):
__SCREAMING_SNAKE_CASE = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__SCREAMING_SNAKE_CASE = alphabet[i - offset]
__SCREAMING_SNAKE_CASE = char
return cipher_alphabet
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() )
def _a ( ) -> None:
__SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip()
__SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip()
__SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ )
print(func(UpperCAmelCase__ , UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 690
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def __A ( a_ : Tuple ,a_ : Optional[int] ):
inspect_dataset(a_ ,a_ )
lowerCAmelCase : int = path + ".py"
assert script_name in os.listdir(a_ )
assert "__pycache__" not in os.listdir(a_ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def __A ( a_ : Optional[int] ,a_ : List[str] ):
inspect_metric(a_ ,a_ )
lowerCAmelCase : List[Any] = path + ".py"
assert script_name in os.listdir(a_ )
assert "__pycache__" not in os.listdir(a_ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __A ( a_ : List[str] ,a_ : Tuple ,a_ : List[str] ):
lowerCAmelCase : List[str] = get_dataset_config_info(a_ ,config_name=a_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __A ( a_ : List[str] ,a_ : Optional[int] ,a_ : Dict ):
with pytest.raises(a_ ):
get_dataset_config_info(a_ ,config_name=a_ )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def __A ( a_ : Optional[int] ,a_ : Optional[int] ):
lowerCAmelCase : str = get_dataset_config_names(a_ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def __A ( a_ : Union[str, Any] ,a_ : Optional[Any] ,a_ : Optional[int] ):
lowerCAmelCase : Optional[Any] = get_dataset_infos(a_ )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase : List[Any] = expected_configs[0]
assert expected_config in infos
lowerCAmelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __A ( a_ : List[str] ,a_ : Tuple ,a_ : str ):
lowerCAmelCase : Union[str, Any] = get_dataset_infos(a_ )
assert expected_config in infos
lowerCAmelCase : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __A ( a_ : Optional[int] ,a_ : Any ,a_ : Dict ):
with pytest.raises(a_ ):
get_dataset_split_names(a_ ,config_name=a_ )
| 525
|
'''simple docstring'''
from __future__ import annotations
def __A ( a_ : float ,a_ : float ,a_ : float ,):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 525
| 1
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
A_ = str(bin(__UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
A_ = str(bin(__UpperCamelCase ) )[2:]
if shift_amount >= len(__UpperCamelCase ):
return "0b0"
A_ = binary_number[: len(__UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
A_ = """0""" + str(bin(__UpperCamelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
A_ = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number
A_ = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
A_ = (
"""1""" + """0""" * (binary_number_length - len(__UpperCamelCase )) + binary_number
)
if shift_amount >= len(__UpperCamelCase ):
return "0b" + binary_number[0] * len(__UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase = logging.get_logger(__name__)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class _lowercase ( __lowerCamelCase ):
_lowercase : Optional[int] = ['pixel_values']
def __init__( self : int , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : List[str] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
A_ = size if size is not None else {'''shortest_edge''': 2_2_4}
A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
A_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
A_ = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
A_ = get_resize_output_image_size(lowerCamelCase__ , size['''shortest_edge'''] , default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
A_ = (size['''height'''], size['''width'''])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ) -> np.ndarray:
"""simple docstring"""
A_ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ) -> List[Any]:
"""simple docstring"""
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : int , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ = to_numpy_array(lowerCamelCase__ )
if do_resize:
A_ = self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ )
if do_center_crop:
A_ = self.center_crop(lowerCamelCase__ , size=lowerCamelCase__ )
if do_rescale:
A_ = self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ )
if do_normalize:
A_ = self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ )
A_ = to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ )
return image
def UpperCamelCase ( self : str , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : int , ) -> PIL.Image.Image:
"""simple docstring"""
A_ = do_resize if do_resize is not None else self.do_resize
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
A_ = make_batched(lowerCamelCase__ )
A_ = [
[
self._preprocess_image(
image=lowerCamelCase__ , do_resize=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , crop_size=lowerCamelCase__ , do_rescale=lowerCamelCase__ , rescale_factor=lowerCamelCase__ , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , data_format=lowerCamelCase__ , )
for img in video
]
for video in videos
]
A_ = {'''pixel_values''': videos}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 563
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[Any] ) -> Any:
debug_launcher(test_script.main )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
debug_launcher(test_ops.main )
| 63
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' , return_dict=__a )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'google/ncsnpp-celebahq-256'
UpperCAmelCase__ = UNetaDModel.from_pretrained(__a )
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=20 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 146
| 0
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , SCREAMING_SNAKE_CASE__ ).groups()[0]
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Dict=None, _UpperCAmelCase : Tuple=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = file_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_transform
SCREAMING_SNAKE_CASE__ : str = label_to_id
def __len__( self : Tuple ) -> Any:
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.file_names[idx]
SCREAMING_SNAKE_CASE__ : List[str] = PIL.Image.open(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = raw_image.convert("RGB" )
if self.image_transform is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_transform(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = extract_label(_UpperCAmelCase )
if self.label_to_id is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.label_to_id[label]
return {"image": image, "label": label}
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
if args.with_tracking:
SCREAMING_SNAKE_CASE__ : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : str = config["lr"]
SCREAMING_SNAKE_CASE__ : str = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE__ : int = int(config["batch_size"] )
SCREAMING_SNAKE_CASE__ : Any = config["image_size"]
if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
SCREAMING_SNAKE_CASE__ : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE__ : str = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
SCREAMING_SNAKE_CASE__ : str = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
SCREAMING_SNAKE_CASE__ : Tuple = os.path.split(SCREAMING_SNAKE_CASE__ )[-1].split("." )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Grab all the image filenames
SCREAMING_SNAKE_CASE__ : Optional[Any] = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
SCREAMING_SNAKE_CASE__ : Optional[Any] = [extract_label(SCREAMING_SNAKE_CASE__ ) for fname in file_names]
SCREAMING_SNAKE_CASE__ : int = list(set(SCREAMING_SNAKE_CASE__ ) )
id_to_label.sort()
SCREAMING_SNAKE_CASE__ : Optional[Any] = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE__ )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE__ )
torch.manual_seed(SCREAMING_SNAKE_CASE__ )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE__ )
# Split our filenames between train and validation
SCREAMING_SNAKE_CASE__ : List[str] = np.random.permutation(len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = int(0.8 * len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = random_perm[:cut]
SCREAMING_SNAKE_CASE__ : str = random_perm[cut:]
# For training we use a simple RandomResizedCrop
SCREAMING_SNAKE_CASE__ : Dict = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.5, 1.0) ), ToTensor()] )
SCREAMING_SNAKE_CASE__ : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE__ , label_to_id=SCREAMING_SNAKE_CASE__ )
# For evaluation, we use a deterministic Resize
SCREAMING_SNAKE_CASE__ : Tuple = Compose([Resize(SCREAMING_SNAKE_CASE__ ), ToTensor()] )
SCREAMING_SNAKE_CASE__ : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE__ , label_to_id=SCREAMING_SNAKE_CASE__ )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Dict = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
SCREAMING_SNAKE_CASE__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_model("resnet50d" , pretrained=SCREAMING_SNAKE_CASE__ , num_classes=len(SCREAMING_SNAKE_CASE__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
for param in model.get_classifier().parameters():
SCREAMING_SNAKE_CASE__ : int = True
# We normalize the batches of images to be a bit faster.
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
SCREAMING_SNAKE_CASE__ : List[Any] = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE__ , max_lr=SCREAMING_SNAKE_CASE__ , epochs=SCREAMING_SNAKE_CASE__ , steps_per_epoch=len(SCREAMING_SNAKE_CASE__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ : int = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
SCREAMING_SNAKE_CASE__ : List[str] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
SCREAMING_SNAKE_CASE__ : int = os.path.splitext(SCREAMING_SNAKE_CASE__ )[0]
if "epoch" in training_difference:
SCREAMING_SNAKE_CASE__ : Tuple = int(training_difference.replace("epoch_" , "" ) ) + 1
SCREAMING_SNAKE_CASE__ : Dict = None
else:
SCREAMING_SNAKE_CASE__ : List[str] = int(training_difference.replace("step_" , "" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = resume_step // len(SCREAMING_SNAKE_CASE__ )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
if args.with_tracking:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
SCREAMING_SNAKE_CASE__ : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE__ : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = (batch["image"] - mean) / std
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE__ : Dict = (batch["image"] - mean) / std
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = accelerator.gather_for_metrics((predictions, batch["label"]) )
SCREAMING_SNAKE_CASE__ : Tuple = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
SCREAMING_SNAKE_CASE__ : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 1_00 * eval_metric,
"train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE__ ),
"epoch": epoch,
} , step=SCREAMING_SNAKE_CASE__ , )
if checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE__ : Dict = f'''epoch_{epoch}'''
if args.output_dir is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
if args.with_tracking:
accelerator.end_training()
def _a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=SCREAMING_SNAKE_CASE__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=SCREAMING_SNAKE_CASE__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 2_24}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 157
|
import numpy
# List of input, output pairs
_lowerCamelCase : List[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
_lowerCamelCase : List[Any] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
_lowerCamelCase : Optional[int] = [2, 4, 1, 5]
_lowerCamelCase : List[str] = len(train_data)
_lowerCamelCase : Tuple = 0.009
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict="train" ) -> List[Any]:
'''simple docstring'''
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - output(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=m ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE__ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE__ ) * train_data[i][0][index]
return summation_value
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = summation_of_cost_derivative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / m
return cost_derivative_value
def _a ( ) -> Optional[int]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.0_0_0_0_0_2
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE__ : int = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ , rtol=SCREAMING_SNAKE_CASE__ , ):
break
SCREAMING_SNAKE_CASE__ : Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def _a ( ) -> List[str]:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 157
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : str=1_8 , _lowerCAmelCase : Union[str, Any]=3_0 , _lowerCAmelCase : Optional[Any]=4_0_0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[int]=True , ):
'''simple docstring'''
__lowercase =size if size is not None else {'shortest_edge': 2_0}
__lowercase =crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__lowercase =parent
__lowercase =batch_size
__lowercase =num_channels
__lowercase =image_size
__lowercase =min_resolution
__lowercase =max_resolution
__lowercase =do_resize
__lowercase =size
__lowercase =do_center_crop
__lowercase =crop_size
__lowercase =do_flip_channel_order
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MobileViTImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =MobileViTImageProcessingTester(self)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , 'do_resize'))
self.assertTrue(hasattr(lowercase_ , 'size'))
self.assertTrue(hasattr(lowercase_ , 'do_center_crop'))
self.assertTrue(hasattr(lowercase_ , 'center_crop'))
self.assertTrue(hasattr(lowercase_ , 'do_flip_channel_order'))
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 2_0})
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8})
__lowercase =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4})
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 474
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE_ = datasets.load_iris()
SCREAMING_SNAKE_CASE_ = np.array(data['''data'''])
SCREAMING_SNAKE_CASE_ = np.array(data['''target'''])
SCREAMING_SNAKE_CASE_ = data['''target_names''']
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = train_test_split(X, y)
def lowercase__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCAmelCase ) - np.array(lowerCAmelCase ) )
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=5 ) -> str:
"""simple docstring"""
UpperCAmelCase = zip(lowerCAmelCase , lowerCAmelCase )
# List of distances of all points from the point to be classified
UpperCAmelCase = []
for data_point in data:
UpperCAmelCase = euclidean_distance(data_point[0] , lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase = [i[1] for i in sorted(lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase = Counter(lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 373
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bit"""
UpperCamelCase__ = ["""preactivation""", """bottleneck"""]
UpperCamelCase__ = ["""SAME""", """VALID"""]
def __init__( self : List[Any] , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Optional[int]=6_4 , __UpperCamelCase : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __UpperCamelCase : Any=[3, 4, 6, 3] , __UpperCamelCase : str="preactivation" , __UpperCamelCase : Union[str, Any]="relu" , __UpperCamelCase : str=None , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any=False , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[str] , )->List[Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 717
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Dict = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """informer"""
UpperCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : List[str] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "student_t" , __UpperCamelCase : str = "nll" , __UpperCamelCase : int = 1 , __UpperCamelCase : List[int] = None , __UpperCamelCase : Optional[Union[str, bool]] = "mean" , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : int = 6_4 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 3_2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : bool = True , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.0_5 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 1_0_0 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str = "prob" , __UpperCamelCase : int = 5 , __UpperCamelCase : bool = True , **__UpperCamelCase : Union[str, Any] , )->Dict:
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
# Informer
_UpperCAmelCase = attention_type
_UpperCAmelCase = sampling_factor
_UpperCAmelCase = distil
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Optional[int] )->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class a_ ( _a ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ):
_lowercase = {}
_lowercase = {}
if prompt is not None:
_lowercase = prompt
if generate_kwargs is not None:
_lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __UpperCamelCase , **__UpperCamelCase ):
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None ):
_lowercase = load_image(__UpperCamelCase )
if prompt is not None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Received an invalid text input, got - {type(__UpperCamelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowercase = self.model.config.model_type
if model_type == "git":
_lowercase = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
_lowercase = self.tokenizer(text=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids
_lowercase = [self.tokenizer.cls_token_id] + input_ids
_lowercase = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowercase = self.image_processor(images=__UpperCamelCase , header_text=__UpperCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowercase = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
_lowercase = self.tokenizer(__UpperCamelCase , return_tensors=self.framework )
model_inputs.update(__UpperCamelCase )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
_lowercase = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowercase = None
return model_inputs
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __UpperCamelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowercase = None
if generate_kwargs is None:
_lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowercase = model_inputs.pop(self.model.main_input_name )
_lowercase = self.model.generate(__UpperCamelCase , **__UpperCamelCase , **__UpperCamelCase )
return model_outputs
def UpperCamelCase_ ( self , __UpperCamelCase ):
_lowercase = []
for output_ids in model_outputs:
_lowercase = {
"""generated_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
| 287
|
from sklearn.metrics import mean_squared_error
import datasets
A : List[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A : str = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A : Any = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase_ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="uniform_average" , __UpperCamelCase=True ):
_lowercase = mean_squared_error(
__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase , multioutput=__UpperCamelCase , squared=__UpperCamelCase )
return {"mse": mse}
| 287
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import random
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase = None ) ->Tuple:
'''simple docstring'''
__a = value
__a = random()
__a = None
__a = None
def __repr__( self ) ->str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) ->str:
'''simple docstring'''
__a = str(self.value ) + ' '
__a = str(self.left or '' )
__a = str(self.right or '' )
return value + left + right
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Node | None, SCREAMING_SNAKE_CASE__: int ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__a , __a = split(root.left, SCREAMING_SNAKE_CASE__ )
return left, root
else:
__a , __a = split(root.right, SCREAMING_SNAKE_CASE__ )
return root, right
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Node | None, SCREAMING_SNAKE_CASE__: Node | None ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__a = merge(left.right, SCREAMING_SNAKE_CASE__ )
return left
else:
__a = merge(SCREAMING_SNAKE_CASE__, right.left )
return right
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Node | None, SCREAMING_SNAKE_CASE__: int ) -> Node | None:
"""simple docstring"""
__a = Node(SCREAMING_SNAKE_CASE__ )
__a , __a = split(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return merge(merge(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Node | None, SCREAMING_SNAKE_CASE__: int ) -> Node | None:
"""simple docstring"""
__a , __a = split(SCREAMING_SNAKE_CASE__, value - 1 )
__a , __a = split(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return merge(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Node | None ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value, end=',' )
inorder(root.right )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Node | None, SCREAMING_SNAKE_CASE__: str ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__a = insert(SCREAMING_SNAKE_CASE__, int(arg[1:] ) )
elif arg[0] == "-":
__a = erase(SCREAMING_SNAKE_CASE__, int(arg[1:] ) )
else:
print('Unknown command' )
return root
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
__a = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__a = input()
while args != "q":
__a = interact_treap(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
__a = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="vivit"
def __init__( self , lowerCamelCase=224 , lowerCamelCase=32 , lowerCamelCase=[2, 16, 16] , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_fast" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=True , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**lowerCamelCase )
| 270
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : str = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 623
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664
| 0
|
'''simple docstring'''
import os
import sys
snake_case_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
snake_case_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Any ) -> int:
"""simple docstring"""
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModel.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> Any:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 717
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68
| 0
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase ) for s in shape] )}.npy'
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 4, 6_4, 6_4) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return image
def _A (self , lowerCAmelCase=False , lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= 'bf16' if fpaa else None
__lowercase, __lowercase= FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase , subfolder='unet' , dtype=lowerCAmelCase , revision=lowerCAmelCase )
return model, params
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 7_7, 7_6_8) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
| 230
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = np.asarray(weights[0] )
lowerCAmelCase__ = np.asarray(weights[1] )
lowerCAmelCase__ = np.asarray(weights[2] )
lowerCAmelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = weights[0][0][0]
lowerCAmelCase__ = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowerCAmelCase__ = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowerCAmelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowerCAmelCase__ = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = torch_model.reformer
# word embeds
lowerCAmelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowerCAmelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowerCAmelCase__ = np.asarray(weights[7][0] )
lowerCAmelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowerCAmelCase__ = np.asarray(weights[9][0] )
lowerCAmelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='xlnet'
__lowerCamelCase : str =['mems']
__lowerCamelCase : int ={
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __lowercase : Optional[Any]=32000 , __lowercase : Optional[Any]=1024 , __lowercase : str=24 , __lowercase : List[str]=16 , __lowercase : List[str]=4096 , __lowercase : Any="gelu" , __lowercase : Optional[int]=True , __lowercase : str="bi" , __lowercase : str=0.02 , __lowercase : int=1E-12 , __lowercase : Optional[int]=0.1 , __lowercase : List[str]=512 , __lowercase : int=None , __lowercase : Union[str, Any]=True , __lowercase : List[str]=False , __lowercase : Tuple=False , __lowercase : List[str]=-1 , __lowercase : Optional[int]=False , __lowercase : Tuple="last" , __lowercase : List[str]=True , __lowercase : Union[str, Any]="tanh" , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=5 , __lowercase : List[Any]=5 , __lowercase : Any=5 , __lowercase : int=1 , __lowercase : List[Any]=2 , **__lowercase : Optional[int] , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = n_layer
__a = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__a = d_model // n_head
__a = ff_activation
__a = d_inner
__a = untie_r
__a = attn_type
__a = initializer_range
__a = layer_norm_eps
__a = dropout
__a = mem_len
__a = reuse_len
__a = bi_data
__a = clamp_len
__a = same_length
__a = summary_type
__a = summary_use_proj
__a = summary_activation
__a = summary_last_dropout
__a = start_n_top
__a = end_n_top
__a = bos_token_id
__a = pad_token_id
__a = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowercase , )
__a = kwargs["""use_cache"""]
__a = use_mems_eval
__a = use_mems_train
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCamelCase_ ( self : List[str] , __lowercase : List[str] ):
'''simple docstring'''
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 225
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__lowerCamelCase : List[int] =list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__lowerCamelCase : List[int] =list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Benchmark training of model'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Verbose memory tracing'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Trace memory line by line'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save result to a CSV file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Save all print statements in a log file'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Whether to print environment information'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'help': 'CSV filename used if saving environment information.'} , )
__lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__lowerCamelCase : int =field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __lowercase , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 225
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : Dict = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _A :
'''simple docstring'''
_snake_case : List[Any] = PegasusConfig
_snake_case : Union[str, Any] = {}
_snake_case : List[Any] = """gelu"""
def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : List[Any]=7 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=99 , lowerCamelCase : Dict=32 , lowerCamelCase : Union[str, Any]=5 , lowerCamelCase : Any=4 , lowerCamelCase : int=37 , lowerCamelCase : int=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Dict=20 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : str=1 , lowerCamelCase : Optional[int]=0 , ):
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] )
__lowercase , __lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
__lowercase = model.decode(lowerCamelCase , lowerCamelCase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] )
__lowercase , __lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
__lowercase = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
__lowercase = np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Tuple = True
_snake_case : Any = False
_snake_case : Optional[int] = False
_snake_case : int = False
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = FlaxPegasusModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__lowercase = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : int , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : int ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("JIT Enabled" ):
__lowercase = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowerCamelCase )
__lowercase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__lowercase = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCamelCase )
__lowercase = np.ones((1, 1) )
__lowercase = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase = tokenizer(lowerCamelCase , return_tensors="np" , truncation=lowerCamelCase , max_length=512 , padding=lowerCamelCase )
__lowercase = model.generate(**lowerCamelCase , num_beams=2 ).sequences
__lowercase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
assert tgt_text == decoded
| 655
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase: Union[str, Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Any = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Tuple = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: Optional[int] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: int = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__a : str = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__a : Optional[int] = model(__a )['last_hidden_state']
__a : Optional[int] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__a : List[str] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 476
| 0
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def snake_case_ ( __snake_case : Callable) -> Callable:
@wraps(__snake_case)
def _inner_fn(*__snake_case : str , **__snake_case : Optional[int]):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , __snake_case , )
return fn(*__snake_case , **__snake_case)
return _inner_fn
| 606
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : Tuple =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def snake_case_ ( __snake_case : Union[str, Any]) -> Optional[Any]:
for pegasus_name, hf_name in PATTERNS:
lowerCAmelCase_ = k.replace(__snake_case , __snake_case)
return k
def snake_case_ ( __snake_case : dict , __snake_case : dict) -> PegasusForConditionalGeneration:
lowerCAmelCase_ = DEFAULTS.copy()
cfg_kwargs.update(__snake_case)
lowerCAmelCase_ = PegasusConfig(**__snake_case)
lowerCAmelCase_ = PegasusForConditionalGeneration(__snake_case)
lowerCAmelCase_ = torch_model.model.state_dict()
lowerCAmelCase_ = {}
for k, v in tf_weights.items():
lowerCAmelCase_ = rename_state_dict_key(__snake_case)
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''')
if "dense" in k or "proj" in new_k:
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.tensor(__snake_case , dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowerCAmelCase_ = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1])
lowerCAmelCase_ = mapping['''shared.weight''']
lowerCAmelCase_ = mapping['''shared.weight''']
lowerCAmelCase_ = {k: torch.zeros_like(__snake_case) for k, v in sd.items() if k.endswith('''bias''') and k not in mapping}
mapping.update(**__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = torch_model.model.load_state_dict(__snake_case , strict=__snake_case)
lowerCAmelCase_ = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case_ ( __snake_case : Optional[int]="./ckpt/aeslc/model.ckpt-32000") -> Dict:
lowerCAmelCase_ = tf.train.list_variables(__snake_case)
lowerCAmelCase_ = {}
lowerCAmelCase_ = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict'''):
lowerCAmelCase_ = any(pat in name for pat in ignore_name)
if skip_key:
continue
lowerCAmelCase_ = tf.train.load_variable(__snake_case , __snake_case)
lowerCAmelCase_ = array
return tf_weights
def snake_case_ ( __snake_case : str , __snake_case : str) -> Optional[int]:
# save tokenizer first
lowerCAmelCase_ = Path(__snake_case).parent.name
lowerCAmelCase_ = task_specific_params[F'''summarization_{dataset}''']['''max_position_embeddings''']
lowerCAmelCase_ = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__snake_case)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case)
# convert model
lowerCAmelCase_ = get_tf_weights_as_numpy(__snake_case)
lowerCAmelCase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowerCAmelCase_ = task_specific_params
lowerCAmelCase_ = convert_pegasus(__snake_case , __snake_case)
torch_model.save_pretrained(__snake_case)
lowerCAmelCase_ = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''')
sd.pop('''model.encoder.embed_positions.weight''')
torch.save(__snake_case , Path(__snake_case) / '''pytorch_model.bin''')
if __name__ == "__main__":
A_ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ : Union[str, Any] =parser.parse_args()
if args.save_dir is None:
A_ : List[Any] =Path(args.tf_ckpt_path).parent.name
A_ : Optional[int] =os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 606
| 1
|
A_ : Dict = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : Optional[Any] = ['a', 'b', 'c', 'd', 'e']
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = start
# add current to visited
visited.append(UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase_: Optional[Any] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase_: Union[str, Any] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
A_ : Dict = topological_sort('a', [], [])
print(sort)
| 57
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
lowerCamelCase__ : int =dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ )
lowerCamelCase__ : Any =dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : int =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : Tuple =dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCamelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any ={'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : List[Any] ={'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : Dict =Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict =1
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =index.search(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : int =np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search_batch , queries[0] )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : str =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Union[str, Any] =FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] =FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Any =faiss.IndexFlat(5 )
lowerCamelCase__ : Any =FaissIndex(custom_index=lowerCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : int =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : Any =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : str =1
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search(lowerCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase_ ( snake_case_ : Dict ) ->int:
import faiss
lowerCamelCase__ : List[str] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] ='index.faiss'
lowerCamelCase__ : Optional[Any] =f"""mock://{index_name}"""
index.save(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : Dict =FaissIndex.load(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : List[Any] =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ , lowerCamelCase__ : List[str] =index.search(snake_case_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Union[str, Any] =Elasticsearch()
lowerCamelCase__ : int ={'acknowledged': True}
lowerCamelCase__ : Optional[Any] =ElasticSearchIndex(es_client=lowerCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Union[str, Any] ='foo'
lowerCamelCase__ : Optional[Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : List[str] ='foo'
lowerCamelCase__ : Union[str, Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] =['foo', 'bar', 'foobar']
lowerCamelCase__ : str ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =index.search_batch(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =[scores[0] for scores in total_scores]
lowerCamelCase__ : Dict =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
# batched queries with timeout
lowerCamelCase__ : str =['foo', 'bar', 'foobar']
lowerCamelCase__ : Any ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ , request_timeout=30 )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : int =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
| 174
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Optional[Any]:
A_ : int = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_UpperCAmelCase )
return config
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_UpperCAmelCase , prev_timestep=_UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[str] = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> str:
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Optional[Any] = scheduler_class(**_UpperCAmelCase )
A_ : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=_UpperCAmelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_UpperCAmelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_UpperCAmelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : int = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_UpperCAmelCase )
A_ : Tuple = scheduler.timesteps
A_ : Union[str, Any] = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
A_ : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ : Optional[int] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
A_ : Any = pred_prev_sample
A_ : int = torch.sum(torch.abs(_UpperCAmelCase ) )
A_ : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> int:
A_ : Any = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config()
A_ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(25 )
A_ : Optional[Any] = scheduler.timesteps
A_ : int = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : Dict = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
A_ : Tuple = model(_UpperCAmelCase , _UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[Any] = None
else:
A_ : int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : Optional[Any] = scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prev_timestep=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
A_ : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> str:
pass
def UpperCAmelCase_ ( self ) -> Dict:
pass
| 714
|
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
A_ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text("""CPU""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Tuple = [mem.copy() for i in range(4 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""GPU""" , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : List[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : str = Text("""Model""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
A_ : List[Any] = []
A_ : str = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : Dict = fill.copy().set_fill(_lowerCamelCase , opacity=0.8 )
target.move_to(_lowerCamelCase )
model_arr.append(_lowerCamelCase )
A_ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCamelCase )
self.add(*_lowerCamelCase , *_lowerCamelCase )
A_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
A_ : Tuple = [meta_mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""Disk""" , font_size=24 )
A_ : Union[str, Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Union[str, Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCamelCase )
A_ : List[str] = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) )
A_ : Optional[int] = Square(0.3 )
input.set_fill(_lowerCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCamelCase , buff=0.5 )
self.play(Write(_lowerCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCamelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCamelCase ) )
self.play(FadeOut(_lowerCamelCase ) )
A_ : Optional[int] = Arrow(start=_lowerCamelCase , end=_lowerCamelCase , color=_lowerCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A_ : Union[str, Any] = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) )
A_ : Any = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCamelCase ) , Circumscribe(model_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
A_ : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A_ : List[str] = AnimationGroup(
FadeOut(_lowerCamelCase , run_time=0.5 ) , MoveToTarget(_lowerCamelCase , run_time=0.5 ) , FadeIn(_lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A_ : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
A_ : Any = a_c
A_ : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCamelCase ) , FadeOut(_lowerCamelCase , run_time=0.5 ) , )
A_ : Tuple = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) , MoveToTarget(_lowerCamelCase ) )
self.wait()
| 385
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =0
lowerCamelCase_ : Tuple =[0]
lowerCamelCase_ : List[Any] =[0]
lowerCamelCase_ : Optional[Any] =len(A_ )
self.assertEqual(k.knapsack(A_ , A_ , A_ , A_ ) , 0 )
lowerCamelCase_ : List[Any] =[60]
lowerCamelCase_ : str =[10]
lowerCamelCase_ : Optional[Any] =len(A_ )
self.assertEqual(k.knapsack(A_ , A_ , A_ , A_ ) , 0 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[int] =3
lowerCamelCase_ : Tuple =[1, 2, 3]
lowerCamelCase_ : Optional[Any] =[3, 2, 1]
lowerCamelCase_ : Dict =len(A_ )
self.assertEqual(k.knapsack(A_ , A_ , A_ , A_ ) , 5 )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Optional[int] =50
lowerCamelCase_ : Dict =[60, 100, 120]
lowerCamelCase_ : int =[10, 20, 30]
lowerCamelCase_ : Union[str, Any] =len(A_ )
self.assertEqual(k.knapsack(A_ , A_ , A_ , A_ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 153
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A__ : Tuple = TypeVar('''KEY''')
A__ : List[Any] = TypeVar('''VAL''')
@dataclass(frozen=UpperCamelCase_ ,slots=UpperCamelCase_ )
class __snake_case ( Generic[KEY, VAL] ):
_a = 42
_a = 42
class __snake_case ( _Item ):
def __init__( self : Union[str, Any]):
super().__init__(A_ , A_)
def __bool__( self : int):
return False
A__ : Optional[int] = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , A_ : int = 8 , A_ : float = 0.75):
lowerCAmelCase_ : Optional[Any] = initial_block_size
lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase_ : str = capacity_factor
lowerCAmelCase_ : Union[str, Any] = 0
def UpperCAmelCase__ ( self : str , A_ : KEY):
return hash(A_) % len(self._buckets)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int):
return (ind + 1) % len(self._buckets)
def UpperCAmelCase__ ( self : Tuple , A_ : int , A_ : KEY , A_ : VAL):
lowerCAmelCase_ : str = self._buckets[ind]
if not stored:
lowerCAmelCase_ : Optional[int] = _Item(A_ , A_)
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase_ : Union[str, Any] = _Item(A_ , A_)
return True
else:
return False
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : List[Any] = len(self._buckets) * self._capacity_factor
return len(self) >= int(A_)
def UpperCAmelCase__ ( self : List[Any]):
if len(self._buckets) <= self._initial_block_size:
return False
lowerCAmelCase_ : Any = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def UpperCAmelCase__ ( self : Optional[Any] , A_ : int):
lowerCAmelCase_ : List[str] = self._buckets
lowerCAmelCase_ : str = [None] * new_size
lowerCAmelCase_ : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def UpperCAmelCase__ ( self : int):
self._resize(len(self._buckets) * 2)
def UpperCAmelCase__ ( self : Dict):
self._resize(len(self._buckets) // 2)
def UpperCAmelCase__ ( self : List[str] , A_ : KEY):
lowerCAmelCase_ : str = self._get_bucket_index(A_)
for _ in range(len(self._buckets)):
yield ind
lowerCAmelCase_ : Tuple = self._get_next_ind(A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : KEY , A_ : VAL):
for ind in self._iterate_buckets(A_):
if self._try_set(A_ , A_ , A_):
break
def __setitem__( self : int , A_ : KEY , A_ : VAL):
if self._is_full():
self._size_up()
self._add_item(A_ , A_)
def __delitem__( self : Tuple , A_ : KEY):
for ind in self._iterate_buckets(A_):
lowerCAmelCase_ : Tuple = self._buckets[ind]
if item is None:
raise KeyError(A_)
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase_ : Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , A_ : KEY):
for ind in self._iterate_buckets(A_):
lowerCAmelCase_ : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A_)
def __len__( self : Dict):
return self._len
def __iter__( self : Dict):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item)
return F"""HashMap({val_string})"""
| 171
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__a = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__a = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__a = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[str]:
return float((preds == labels).mean() )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="binary" ) ->Union[str, Any]:
UpperCAmelCase = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = {}
for id_pred, label in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCAmelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase = [(pred, label)]
UpperCAmelCase , UpperCAmelCase = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase , UpperCAmelCase = zip(*lowerCAmelCase_ )
UpperCAmelCase = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average="""macro""" )
fas.append(lowerCAmelCase_ )
UpperCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase_ ) )
ems.append(lowerCAmelCase_ )
UpperCAmelCase = float(sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) )
UpperCAmelCase = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCAmelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCAmelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 627
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__a = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 627
| 1
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(SCREAMING_SNAKE_CASE__ ) * abs(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 260
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str]) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase , """embed_dim"""))
self.parent.assertTrue(hasattr(lowerCAmelCase , """num_heads"""))
class snake_case :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : Union[str, Any]=64 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : int=[16, 48, 96] , lowerCAmelCase : List[Any]=[1, 3, 6] , lowerCAmelCase : str=[1, 2, 10] , lowerCAmelCase : int=[7, 3, 3] , lowerCAmelCase : Union[str, Any]=[4, 2, 2] , lowerCAmelCase : List[Any]=[2, 1, 1] , lowerCAmelCase : Optional[Any]=[2, 2, 2] , lowerCAmelCase : str=[False, False, True] , lowerCAmelCase : str=[0.0, 0.0, 0.0] , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Tuple=True , lowerCAmelCase : int=True , lowerCAmelCase : int=2 , ) -> Tuple:
"""simple docstring"""
_snake_case : Optional[int] = parent
_snake_case : List[str] = batch_size
_snake_case : int = image_size
_snake_case : Tuple = patch_sizes
_snake_case : Dict = patch_stride
_snake_case : Dict = patch_padding
_snake_case : Any = is_training
_snake_case : int = use_labels
_snake_case : List[str] = num_labels
_snake_case : Dict = num_channels
_snake_case : int = embed_dim
_snake_case : str = num_heads
_snake_case : Union[str, Any] = stride_kv
_snake_case : Optional[Any] = depth
_snake_case : List[str] = cls_token
_snake_case : Optional[Any] = attention_drop_rate
_snake_case : List[Any] = initializer_range
_snake_case : str = layer_norm_eps
def UpperCamelCase_ ( self : Any) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_snake_case : Optional[int] = None
if self.use_labels:
# create a random int32 tensor of given shape
_snake_case : Any = ids_tensor([self.batch_size] , self.num_labels)
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Any = TFCvtModel(config=lowerCAmelCase)
_snake_case : int = model(lowerCAmelCase , training=lowerCAmelCase)
_snake_case : int = (self.image_size, self.image_size)
_snake_case , _snake_case : Dict = image_size[0], image_size[1]
for i in range(len(self.depth)):
_snake_case : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
_snake_case : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_snake_case : int = self.num_labels
_snake_case : List[str] = TFCvtForImageClassification(lowerCAmelCase)
_snake_case : Dict = model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase_ ( self : str) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
snake_case_ : int = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
snake_case_ : Tuple = False
snake_case_ : Union[str, Any] = False
snake_case_ : Dict = False
snake_case_ : str = False
snake_case_ : Optional[Any] = False
def UpperCamelCase_ ( self : Any) -> List[str]:
"""simple docstring"""
_snake_case : int = TFCvtModelTester(self)
_snake_case : Dict = TFCvtConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37)
def UpperCamelCase_ ( self : Dict) -> Tuple:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""")
def UpperCamelCase_ ( self : Dict) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""")
def UpperCamelCase_ ( self : List[str]) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""")
def UpperCamelCase_ ( self : Tuple) -> str:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCamelCase_ ( self : Tuple) -> List[str]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""")
def UpperCamelCase_ ( self : int) -> Dict:
"""simple docstring"""
_snake_case : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""")
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""")
def UpperCamelCase_ ( self : Dict) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = model_class(lowerCAmelCase)
_snake_case : Tuple = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[Any]) -> Dict:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Any):
_snake_case : int = model_class(lowerCAmelCase)
_snake_case : str = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
_snake_case : Dict = outputs.hidden_states
_snake_case : Any = len(self.model_tester.depth)
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[int] = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCamelCase_ ( self : Dict) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase)
@slow
def UpperCamelCase_ ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Dict = TFCvtModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def lowercase ( ) -> Any:
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCamelCase_ ( self : List[str]) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
_snake_case : Dict = self.default_image_processor
_snake_case : int = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=lowerCAmelCase , return_tensors="""tf""")
# forward pass
_snake_case : int = model(**lowerCAmelCase)
# verify the logits
_snake_case : Optional[int] = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
_snake_case : int = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase , atol=1E-4))
| 477
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, _a, _a=13, _a=7, _a=True, _a=True, _a=True, _a=True, _a=99, _a=32, _a=5, _a=4, _a=37, _a="gelu", _a=0.1, _a=0.1, _a=5_12, _a=16, _a=2, _a=0.02, _a=False, _a=True, _a="None", _a=3, _a=4, _a=None, ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def __lowerCAmelCase ( self, _a ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ), [] )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Dict:
__SCREAMING_SNAKE_CASE = DebertaVaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, token_type_ids=_a )[0]
__SCREAMING_SNAKE_CASE = model(_a, token_type_ids=_a )[0]
__SCREAMING_SNAKE_CASE = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, token_type_ids=_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, token_type_ids=_a, labels=_a )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a, token_type_ids=_a, labels=_a )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_a, attention_mask=_a, token_type_ids=_a, start_positions=_a, end_positions=_a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Any:
__SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
_a, attention_mask=_a, token_type_ids=_a, labels=_a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=_a, hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_a )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(_a, attention_mask=_a )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], _a, atol=1E-4 ), f'''{output[:, 1:4, 1:4]}''' )
| 721
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( __snake_case :List[str] , __snake_case :List[Any]=0.9_9_9 , __snake_case :str="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case :str ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case :Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE = []
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =[e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ =2
@register_to_config
def __init__( self, _a = 10_00, _a = 0.0_0085, _a = 0.012, _a = "linear", _a = None, _a = "epsilon", _a = False, _a = False, _a = 1.0, _a = "linspace", _a = 0, ) -> str:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(_a, dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(_a, _a, _a, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5, beta_end**0.5, _a, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(_a, alpha_transform_type="cosine" )
elif beta_schedule == "exp":
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(_a, alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(_a, _a, _a )
__SCREAMING_SNAKE_CASE = use_karras_sigmas
def __lowerCAmelCase ( self, _a, _a=None ) -> Any:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(_a ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self, _a, _a, ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE = self.index_for_timestep(_a )
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self, _a, _a = None, _a = None, ) -> str:
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0, num_train_timesteps - 1, _a, dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0, _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(_a, 0, -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE = np.log(_a )
__SCREAMING_SNAKE_CASE = np.interp(_a, np.arange(0, len(_a ) ), _a )
if self.config.use_karras_sigmas:
__SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=_a, num_inference_steps=self.num_inference_steps )
__SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(_a, _a ) for sigma in sigmas] )
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_a ).to(device=_a )
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_a )
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith("mps" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = timesteps.to(_a, dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = timesteps.to(device=_a )
# empty dt and derivative
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(_a )
def __lowerCAmelCase ( self, _a, _a ) -> int:
# get log sigma
__SCREAMING_SNAKE_CASE = np.log(_a )
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = np.clip(_a, 0, 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self, _a, _a ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
__SCREAMING_SNAKE_CASE = in_sigmas[0].item()
__SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
__SCREAMING_SNAKE_CASE = np.linspace(0, 1, _a )
__SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
__SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
__SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.dt is None
def __lowerCAmelCase ( self, _a, _a, _a, _a = True, ) -> Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.index_for_timestep(_a )
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = derivative
__SCREAMING_SNAKE_CASE = dt
__SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
__SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__SCREAMING_SNAKE_CASE = self.dt
__SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowerCAmelCase ( self, _a, _a, _a, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device, dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(_a, _a ) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 214
| 0
|
from __future__ import annotations
_snake_case = 1.6021e-19 # units = C
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import torch
from diffusers import DiffusionPipeline
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a ) -> List[str]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
def __call__( self ) -> Tuple:
_A : List[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A : Tuple = 1
_A : List[str] = self.unet(_a , _a ).sample
_A : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
_A : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 307
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Tuple )-> List[Any]:
_snake_case : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
_snake_case : Tuple = downstream_dict['projector.weight']
_snake_case : Any = downstream_dict['projector.bias']
_snake_case : Dict = downstream_dict['model.post_net.linear.weight']
_snake_case : int = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict , lowerCAmelCase: Any )-> int:
_snake_case : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
_snake_case : List[str] = downstream_dict['model.linear.weight']
_snake_case : Optional[int] = downstream_dict['model.linear.bias']
return model
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any] )-> Tuple:
_snake_case : Tuple = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
_snake_case : Tuple = downstream_dict['connector.weight']
_snake_case : str = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_snake_case : Tuple = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_snake_case : str = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_snake_case : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_snake_case : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_snake_case : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_snake_case : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_snake_case : int = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any] )-> Dict:
_snake_case : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
_snake_case : int = checkpoint['Downstream']
_snake_case : Union[str, Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
_snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
_snake_case : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_snake_case : List[str] = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
_snake_case : List[str] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForXVector' ):
_snake_case : Optional[int] = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_snake_case : Optional[int] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 706
|
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :int =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
A = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
A = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
A = 847
A = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
A = 150
A = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
A = 171
A = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
A = 133
A = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
A = 19
A = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
A = 65
A = 'mapillary-vistas-id2label.json'
A = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
A = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
A = dct.pop(lowerCAmelCase__ )
A = val
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
A = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:dim, :]
A = in_proj_bias[: dim]
A = in_proj_weight[
dim : dim * 2, :
]
A = in_proj_bias[
dim : dim * 2
]
A = in_proj_weight[
-dim :, :
]
A = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[: hidden_size, :]
A = in_proj_bias[:config.hidden_size]
A = in_proj_weight[hidden_size : hidden_size * 2, :]
A = in_proj_bias[hidden_size : hidden_size * 2]
A = in_proj_weight[-hidden_size :, :]
A = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
A = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[: hidden_size, :]
A = in_proj_bias[:config.hidden_size]
A = in_proj_weight[hidden_size : hidden_size * 2, :]
A = in_proj_bias[hidden_size : hidden_size * 2]
A = in_proj_weight[-hidden_size :, :]
A = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCamelCase_ ( ) -> torch.Tensor:
'''simple docstring'''
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False ) -> Optional[Any]:
'''simple docstring'''
A = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , 'rb' ) as f:
A = pickle.load(lowerCAmelCase__ )
A = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
A = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
A = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
A , A = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
A = prepare_img()
if "vistas" in model_name:
A = 65
elif "cityscapes" in model_name:
A = 65535
else:
A = 255
A = True if 'ade' in model_name else False
A = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
A = image_processor(lowerCAmelCase__ , return_tensors='pt' )
A = model(**lowerCAmelCase__ )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case :List[Any] =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 106
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None
| 34
| 0
|
"""simple docstring"""
class _UpperCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = row
A_ : Optional[Any] = col
A_ : str = graph
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Any = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A_ : Any = [-1, 0, 1, -1, 1, -1, 0, 1]
A_ : Any = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , snake_case_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , snake_case_ )
def lowerCamelCase_ ( self ): # And finally, count all islands.
"""simple docstring"""
A_ : Any = [[False for j in range(self.COL )] for i in range(self.ROW )]
A_ : List[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(snake_case_ , snake_case_ , snake_case_ )
count += 1
return count
| 302
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Dict = IFPipeline
lowercase_ : int = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowercase_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : str = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase_ ( self , snake_case_ , snake_case_=0 ):
"""simple docstring"""
if str(snake_case_ ).startswith('mps' ):
A_ : Optional[int] = torch.manual_seed(snake_case_ )
else:
A_ : str = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
A_ : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=snake_case_ , tokenizer=snake_case_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
A_ , A_ : Tuple = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A_ : List[Any] = None
A_ : List[str] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
A_ : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A_ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
A_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
_start_torch_memory_measurement()
A_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type='np' , )
A_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : str = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
_start_torch_memory_measurement()
A_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type='np' , )
A_ : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A_ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
_start_torch_memory_measurement()
A_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case_ )
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type='np' , )
A_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case_ )
A_ : str = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 302
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56
|
def UpperCAmelCase__ ( __magic_name__ : int ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = 0, 0, 0
lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase : Any = ugly_nums[ia] * 3
lowerCAmelCase : List[Any] = ugly_nums[ia] * 5
for _ in range(1 , __magic_name__ ):
lowerCAmelCase : List[str] = min(__magic_name__ , __magic_name__ , __magic_name__ )
ugly_nums.append(__magic_name__ )
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""")
| 348
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase = field(
default=_lowerCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase__ : int = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase__ : Dict = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
lowerCamelCase__ : Optional[Any] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : str = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowerCamelCase__ : Tuple = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : int = predict_dataset.features['''label'''].names
# Labels
lowerCamelCase__ : Optional[int] = len(_lowercase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel={str(_lowercase ): label for i, label in enumerate(_lowercase )} , labelaid={label: i for i, label in enumerate(_lowercase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ : Tuple = False
def preprocess_function(_lowercase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_lowercase , max_length=data_args.max_seq_length , truncation=_lowercase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ : List[Any] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase__ : Tuple = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase__ : Any = train_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowercase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ : List[Any] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase__ : Union[str, Any] = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase__ : int = eval_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase__ : List[Any] = min(len(_lowercase ) , data_args.max_predict_samples )
lowerCamelCase__ : int = predict_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCamelCase__ : Any = predict_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCamelCase__ : Dict = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
lowerCamelCase__ : List[Any] = p.predictions[0] if isinstance(p.predictions , _lowercase ) else p.predictions
lowerCamelCase__ : List[Any] = np.argmax(_lowercase , axis=1 )
return metric.compute(predictions=_lowercase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ : str = default_data_collator
elif training_args.fpaa:
lowerCamelCase__ : Dict = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 )
else:
lowerCamelCase__ : Dict = None
# Initialize our Trainer
lowerCamelCase__ : List[str] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase__ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Tuple = last_checkpoint
lowerCamelCase__ : Any = trainer.train(resume_from_checkpoint=_lowercase )
lowerCamelCase__ : Union[str, Any] = train_result.metrics
lowerCamelCase__ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase__ : Optional[Any] = min(_lowercase , len(_lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Any = trainer.evaluate(eval_dataset=_lowercase )
lowerCamelCase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase__ : str = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = trainer.predict(_lowercase , metric_key_prefix='''predict''' )
lowerCamelCase__ : Dict = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowercase )
)
lowerCamelCase__ : Any = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''predict''' , _lowercase )
trainer.save_metrics('''predict''' , _lowercase )
lowerCamelCase__ : Optional[Any] = np.argmax(_lowercase , axis=1 )
lowerCamelCase__ : Tuple = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowercase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_lowercase ):
lowerCamelCase__ : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 121
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__ : Tuple = remove_duplicates(key.upper() )
lowerCamelCase__ : Optional[Any] = len(_lowercase )
# First fill cipher with key characters
lowerCamelCase__ : int = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
lowerCamelCase__ : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__ : Optional[Any] = alphabet[i - offset]
lowerCamelCase__ : Union[str, Any] = char
return cipher_alphabet
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = input('''Enter message to encode or decode: ''' ).strip()
lowerCamelCase__ : List[str] = input('''Enter keyword: ''' ).strip()
lowerCamelCase__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCamelCase__ : int = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCamelCase__ : Optional[Any] = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121
| 1
|
'''simple docstring'''
def _A ( snake_case__ : int ):
snake_case__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 261
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = data
snake_case__ : Node | None = None
class snake_case :
"""simple docstring"""
def __init__( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
def __iter__( self ) -> Iterator[Any]:
"""simple docstring"""
snake_case__ : Dict = self.head
while self.head:
yield node.data
snake_case__ : str = node.next
if node == self.head:
break
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return "->".join(str(lowerCamelCase ) for item in iter(self ) )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(0 , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
snake_case__ : Optional[int] = Node(lowerCamelCase )
if self.head is None:
snake_case__ : Tuple = new_node # first node points itself
snake_case__ : Union[str, Any] = new_node
elif index == 0: # insert at head
snake_case__ : Any = self.head
snake_case__ : Any = new_node
else:
snake_case__ : Optional[Any] = self.head
for _ in range(index - 1 ):
snake_case__ : List[Any] = temp.next
snake_case__ : Dict = temp.next
snake_case__ : str = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ : Optional[Any] = new_node
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , lowerCamelCase = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
snake_case__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
snake_case__ : int = None
elif index == 0: # delete head node
snake_case__ : Dict = self.tail.next.next
snake_case__ : int = self.head.next
else:
snake_case__ : Dict = self.head
for _ in range(index - 1 ):
snake_case__ : Any = temp.next
snake_case__ : Dict = temp.next
snake_case__ : str = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ : List[Any] = temp
return delete_node.data
def lowercase__ ( self ) -> bool:
"""simple docstring"""
return len(self ) == 0
def _A ( ):
snake_case__ : int = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261
| 1
|
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : List[str] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_( lowercase_ : list[list[int]] , lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
_lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase_ ) )
] # the reference grid
_lowerCamelCase = 1
_lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase_ ) )
] # the action grid
_lowerCamelCase = init[0]
_lowerCamelCase = init[1]
_lowerCamelCase = 0
_lowerCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell
_lowerCamelCase = [[f, g, x, y]]
_lowerCamelCase = False # flag that is set when search is complete
_lowerCamelCase = False # flag set if we can't find expand
while not found and not resign:
if len(lowercase_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowerCamelCase = cell.pop()
_lowerCamelCase = next_cell[2]
_lowerCamelCase = next_cell[3]
_lowerCamelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowerCamelCase = True
else:
for i in range(len(lowercase_ ) ): # to try out different valid actions
_lowerCamelCase = x + DIRECTIONS[i][0]
_lowerCamelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowercase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowerCamelCase = g + cost
_lowerCamelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowerCamelCase = 1
_lowerCamelCase = i
_lowerCamelCase = []
_lowerCamelCase = goal[0]
_lowerCamelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowerCamelCase = x - DIRECTIONS[action[x][y]][0]
_lowerCamelCase = y - DIRECTIONS[action[x][y]][1]
_lowerCamelCase = xa
_lowerCamelCase = ya
invpath.append([x, y] )
_lowerCamelCase = []
for i in range(len(lowercase_ ) ):
path.append(invpath[len(lowercase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__SCREAMING_SNAKE_CASE : str = [0, 0]
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE : Dict = [len(grid) - 1, len(grid[0]) - 1]
__SCREAMING_SNAKE_CASE : int = 1
# the cost map which pushes the path closer to the goal
__SCREAMING_SNAKE_CASE : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__SCREAMING_SNAKE_CASE : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__SCREAMING_SNAKE_CASE : List[str] = 9_9
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 700
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623
| 0
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , **_A ):
'''simple docstring'''
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 148
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''vit_mae'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=16 , lowerCamelCase__=512 , lowerCamelCase__=8 , lowerCamelCase__=2_048 , lowerCamelCase__=0.75 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = decoder_num_attention_heads
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = decoder_num_hidden_layers
__lowerCamelCase = decoder_intermediate_size
__lowerCamelCase = mask_ratio
__lowerCamelCase = norm_pix_loss
| 469
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
UpperCamelCase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase_ = get_tests_dir("fixtures/dummy-config.json")
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = 0
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :List[Any]) -> Optional[int]:
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase).to_dict()
config_dict.pop('''feature_extractor_type''')
UpperCAmelCase_ = WavaVecaFeatureExtractor(**_lowercase)
# save in new folder
model_config.save_pretrained(_lowercase)
config.save_pretrained(_lowercase)
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase)
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :List[str]) -> Optional[Any]:
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
def __a ( self :Optional[int]) -> str:
with self.assertRaisesRegex(
_lowercase , '''bert-base is not a local folder and is not a valid model identifier'''):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained('''bert-base''')
def __a ( self :Any) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase , revision='''aaaaaa''')
def __a ( self :Union[str, Any]) -> Dict:
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''')
def __a ( self :Optional[Any]) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase)
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase , trust_remote_code=_lowercase)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
def __a ( self :List[Any]) -> Optional[int]:
try:
AutoConfig.register('''custom''' , _lowercase)
AutoFeatureExtractor.register(_lowercase , _lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase):
AutoFeatureExtractor.register(_lowercase , _lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(_lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase)
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(_lowercase)
self.assertIsInstance(_lowercase , _lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Tuple) -> int:
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =True
try:
AutoConfig.register('''custom''' , _lowercase)
AutoFeatureExtractor.register(_lowercase , _lowercase)
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_lowercase)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(not hasattr(_lowercase , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 561
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =SpeechTaTokenizer
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =True
def __a ( self :Union[str, Any]) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = SpeechTaTokenizer(_lowercase)
UpperCAmelCase_ = AddedToken('''<mask>''' , lstrip=_lowercase , rstrip=_lowercase)
UpperCAmelCase_ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Tuple , _lowercase :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = '''this is a test'''
UpperCAmelCase_ = '''this is a test'''
return input_text, output_text
def __a ( self :List[Any] , _lowercase :str , _lowercase :List[str]=False , _lowercase :Union[str, Any]=20 , _lowercase :Any=5) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase)
return text, ids
def __a ( self :Dict) -> str:
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(_lowercase) , 81)
def __a ( self :Dict) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase_ = tokenizer.add_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase_ = tokenizer.add_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size_a + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __a ( self :Any) -> List[str]:
pass
def __a ( self :Any) -> Tuple:
pass
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def __a ( self :Any) -> List[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase_ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase_ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowercase , )
| 561
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , snake_case :Any , snake_case :Union[str, Any]=13 , snake_case :List[str]=3 , snake_case :str=224 , snake_case :List[Any]=30 , snake_case :Optional[Any]=400 , snake_case :List[str]=True , snake_case :List[Any]=None , snake_case :str=True , snake_case :List[str]=[0.5, 0.5, 0.5] , snake_case :Tuple=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
A_ : Union[str, Any] = size if size is not None else {"height": 18, "width": 18}
A_ : Union[str, Any] = parent
A_ : str = batch_size
A_ : int = num_channels
A_ : Dict = image_size
A_ : Any = min_resolution
A_ : Dict = max_resolution
A_ : Any = do_resize
A_ : List[str] = size
A_ : List[Any] = do_normalize
A_ : Optional[int] = image_mean
A_ : int = image_std
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Tuple = EfficientFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , "image_mean" ) )
self.assertTrue(hasattr(__lowercase , "image_std" ) )
self.assertTrue(hasattr(__lowercase , "do_normalize" ) )
self.assertTrue(hasattr(__lowercase , "do_resize" ) )
self.assertTrue(hasattr(__lowercase , "size" ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
A_ : Dict = image_processor(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
A_ : Optional[int] = image_processor(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
A_ : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
A_ : str = image_processor(__lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 454
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__snake_case = logging.get_logger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ :List[str] = deprecated_arg[3:]
UpperCamelCase__ :Optional[Any] = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCamelCase__ :Optional[int] = kwargs.pop('''tpu_name''' , self.tpu_name )
UpperCamelCase__ :Union[str, Any] = kwargs.pop('''device_idx''' , self.device_idx )
UpperCamelCase__ :Optional[Any] = kwargs.pop('''eager_mode''' , self.eager_mode )
UpperCamelCase__ :Optional[Any] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCamelCase_ )
_a = field(
default=A__ , metadata={'help': 'Name of TPU'} , )
_a = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_a = field(default=A__ , metadata={'help': 'Benchmark models in eager model.'} )
_a = field(
default=A__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
UpperCamelCase__ :Optional[int] = None
if self.tpu:
try:
if self.tpu_name:
UpperCamelCase__ :Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCamelCase__ :Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCamelCase__ :str = None
return tpu
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCamelCase__ :Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
UpperCamelCase__ :Optional[int] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
UpperCamelCase__ :Dict = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 705
|
'''simple docstring'''
import numpy as np
def a ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase__ :Optional[int] = np.zeros((n + 1,) )
UpperCamelCase__ :List[str] = ya
UpperCamelCase__ :Tuple = xa
for k in range(__a ):
UpperCamelCase__ :Dict = f(__a , y[k] )
UpperCamelCase__ :List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ :Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ :List[Any] = f(x + h , y[k] + h * ka )
UpperCamelCase__ :List[str] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 0
|
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE_: Optional[Any] ={value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = "Morse code here!"
print(snake_case_ )
UpperCAmelCase_ = encrypt(snake_case_ )
print(snake_case_ )
UpperCAmelCase_ = decrypt(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
main()
| 78
|
from manim import *
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 )
lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 )
lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 )
lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ : str = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 678
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_ : List[Any] = script_fpath.stem
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
SCREAMING_SNAKE_CASE_ : int = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 311
|
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
if not sentence:
return ""
SCREAMING_SNAKE_CASE_ : int = dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 311
| 1
|
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase : Optional[Any] = [True] * 100_0001
_UpperCamelCase : List[str] = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
_UpperCamelCase : Union[str, Any] = False
i += 1
def snake_case ( snake_case : int ) -> bool:
"""simple docstring"""
return seive[n]
def snake_case ( snake_case : int ) -> bool:
"""simple docstring"""
return any(digit in '02468' for digit in str(snake_case ) )
def snake_case ( snake_case : int = 1000000 ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case ) and not contains_an_even_digit(snake_case ):
lowerCAmelCase = str(snake_case )
lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case ) )]
if all(is_prime(snake_case ) for i in list_nums ):
result.append(snake_case )
return result
def snake_case ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 284
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = GPTSwaTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = 'This is a test'
lowerCAmelCase = 'This is a test'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '<s>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 20_00 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_SCREAMING_SNAKE_CASE , )
| 284
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = (KDPMaDiscreteScheduler,)
_UpperCAmelCase = 1_0
def lowerCAmelCase_ ( self: List[Any] , **UpperCamelCase: List[str] ) -> Dict:
snake_case__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCamelCase )
return config
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def lowerCAmelCase_ ( self: str ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> List[str]:
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(prediction_type='v_prediction' )
snake_case__ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
snake_case__ = model(UpperCamelCase , UpperCamelCase )
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(UpperCamelCase ) )
snake_case__ = torch.mean(torch.abs(UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
if torch_device == "mps":
return
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
snake_case__ = model(UpperCamelCase , UpperCamelCase )
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(UpperCamelCase ) )
snake_case__ = torch.mean(torch.abs(UpperCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
if torch_device == "mps":
return
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter.to(UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
snake_case__ = model(UpperCamelCase , UpperCamelCase )
snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(UpperCamelCase ) )
snake_case__ = torch.mean(torch.abs(UpperCamelCase ) )
if str(UpperCamelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 372
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ = model_type_to_module_name(_A )
snake_case__ = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '__name__' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ = importlib.import_module('transformers' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def a_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_A , encoding='utf-8' ) as reader:
return json.load(_A )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] ) -> Union[str, Any]:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCAmelCase_ ( cls: int , UpperCamelCase: int , **UpperCamelCase: str ) -> Optional[Any]:
snake_case__ = kwargs.pop('config' , UpperCamelCase )
snake_case__ = kwargs.pop('trust_remote_code' , UpperCamelCase )
snake_case__ = True
snake_case__ , snake_case__ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
snake_case__ = config_dict.get('image_processor_type' , UpperCamelCase )
snake_case__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case__ = config_dict.pop('feature_extractor_type' , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
snake_case__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoFeatureExtractor']
snake_case__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
snake_case__ = getattr(UpperCamelCase , 'image_processor_type' , UpperCamelCase )
if hasattr(UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
snake_case__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
snake_case__ = image_processor_class_from_name(UpperCamelCase )
snake_case__ = image_processor_auto_map is not None
snake_case__ = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case__ = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
snake_case__ = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
snake_case__ = kwargs.pop('code_revision' , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case__ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 372
| 1
|
'''simple docstring'''
def a_ ( __snake_case : int ) -> list[list]:
"""simple docstring"""
lowerCamelCase_ =current_set.copy()
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =row[0]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
if magnitude == 0:
lowerCamelCase_ =column
continue
lowerCamelCase_ =column / magnitude
# Subtract to cancel term
lowerCamelCase_ =current_set[0]
lowerCamelCase_ =[first_row]
lowerCamelCase_ =current_set[1::]
for row in current_set:
lowerCamelCase_ =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(__SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_ =final_set[0]
lowerCamelCase_ =[]
lowerCamelCase_ =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_ =simplify(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ =resultant
return final_set
def a_ ( __snake_case : Dict ) -> list:
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowerCamelCase_ =len(__SCREAMING_SNAKE_CASE ) + 1
if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_ =equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_ =data_set.copy()
lowerCamelCase_ =[]
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
if 0 not in row:
lowerCamelCase_ =data_set.pop(__SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ =data_set.copy()
lowerCamelCase_ =simplify(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =simplified[::-1]
lowerCamelCase_ =[]
for row in simplified:
lowerCamelCase_ =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_ =row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
lowerCamelCase_ =temp_row[1::]
lowerCamelCase_ =temp_row[::-1]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[]
for item in solutions:
final.append(float(round(__SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 676
|
import math
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCamelCase__ : Optional[int] = [True] * (end + 1)
UpperCamelCase__ : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = False
start += 1
prime += in_prime
UpperCamelCase__ : Union[str, Any] = end + 1
UpperCamelCase__ : Optional[Any] = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCamelCase__ : Dict = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase__ : Any = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase__ : Optional[int] = high + 1
UpperCamelCase__ : List[Any] = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 410
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'audio-spectrogram-transformer'
def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=True ,_lowerCAmelCase=10 ,_lowerCAmelCase=10 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=1_28 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = patch_size
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = frequency_stride
lowerCamelCase__ = time_stride
lowerCamelCase__ = max_length
lowerCamelCase__ = num_mel_bins
| 709
|
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9
| 0
|
"""simple docstring"""
def UpperCAmelCase ( A : str , A : int ):
'''simple docstring'''
_UpperCAmelCase = word.split()
def justify(A : list , A : int , A : int ) -> str:
_UpperCAmelCase = max_width - width
_UpperCAmelCase = len(A )
if len(A ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_UpperCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_UpperCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_UpperCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(A ):
num_spaces_between_words_list[i] += 1
_UpperCAmelCase = []
for i in range(A ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(A )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
for word in words:
if width + len(A ) + len(A ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(A )
width += len(A )
else:
# justify the line and add it to result
answer.append(justify(A , A , A ) )
# reset new line and new width
_UpperCAmelCase , _UpperCAmelCase = [word], len(A )
_UpperCAmelCase = max_width - width - len(A )
answer.append(' '.join(A ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 573
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''lxmert'''
_UpperCAmelCase = {}
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=9500 , snake_case=1600 , snake_case=400 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=9 , snake_case=5 , snake_case=5 , snake_case=2048 , snake_case=4 , snake_case=6.67 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = num_qa_labels
_UpperCAmelCase = num_object_labels
_UpperCAmelCase = num_attr_labels
_UpperCAmelCase = l_layers
_UpperCAmelCase = x_layers
_UpperCAmelCase = r_layers
_UpperCAmelCase = visual_feat_dim
_UpperCAmelCase = visual_pos_dim
_UpperCAmelCase = visual_loss_normalizer
_UpperCAmelCase = task_matched
_UpperCAmelCase = task_mask_lm
_UpperCAmelCase = task_obj_predict
_UpperCAmelCase = task_qa
_UpperCAmelCase = visual_obj_loss
_UpperCAmelCase = visual_attr_loss
_UpperCAmelCase = visual_feat_loss
_UpperCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 573
| 1
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
_lowerCamelCase : List[Any] = """pixel_values"""
_lowerCamelCase : str = False
_lowerCamelCase : Any = TimmBackboneConfig
def __init__( self : str , snake_case_ : List[str] , **snake_case_ : List[Any] ):
requires_backends(self , "timm" )
super().__init__(snake_case_ )
_UpperCAmelCase = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(snake_case_ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase = getattr(snake_case_ , "use_pretrained_backbone" , snake_case_ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase = config.out_indices if getattr(snake_case_ , "out_indices" , snake_case_ ) is not None else (-1,)
_UpperCAmelCase = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase = self._backbone.return_layers
_UpperCAmelCase = {layer["module"]: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def lowercase ( cls : List[Any] , snake_case_ : Optional[Any] , *snake_case_ : Any , **snake_case_ : Union[str, Any] ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase = kwargs.pop("use_timm_backbone" , snake_case_ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : int ):
pass
def lowercase ( self : Any , snake_case_ : Dict , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : str=None , **snake_case_ : Optional[Any] ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase = self._all_layers
_UpperCAmelCase = self._backbone(snake_case_ , **snake_case_ )
_UpperCAmelCase = self._return_layers
_UpperCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase = self._backbone(snake_case_ , **snake_case_ )
_UpperCAmelCase = None
_UpperCAmelCase = tuple(snake_case_ )
_UpperCAmelCase = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ )
| 119
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__SCREAMING_SNAKE_CASE :int = True
except (ImportError, ModuleNotFoundError):
__SCREAMING_SNAKE_CASE :Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , __lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowercase ) )
| 119
| 1
|
'''simple docstring'''
import math
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
_snake_case : Any = input('Enter message: ')
_snake_case : Optional[int] = int(input(F"""Enter key [2-{len(a__) - 1}]: """))
_snake_case : List[Any] = input('Encryption/Decryption [e/d]: ')
if mode.lower().startswith('e'):
_snake_case : Dict = encrypt_message(a__ , a__)
elif mode.lower().startswith('d'):
_snake_case : Dict = decrypt_message(a__ , a__)
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""")
def lowerCamelCase__ ( a__ , a__) -> str:
"""simple docstring"""
_snake_case : str = [''] * key
for col in range(a__):
_snake_case : int = col
while pointer < len(a__):
cipher_text[col] += message[pointer]
pointer += key
return "".join(a__)
def lowerCamelCase__ ( a__ , a__) -> str:
"""simple docstring"""
_snake_case : Optional[int] = math.ceil(len(a__) / key)
_snake_case : str = key
_snake_case : Optional[int] = (num_cols * num_rows) - len(a__)
_snake_case : List[str] = [''] * num_cols
_snake_case : Any = 0
_snake_case : Optional[int] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_snake_case : Optional[int] = 0
row += 1
return "".join(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 517
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __init__( self : Tuple , *snake_case : Any , **snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 517
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : str = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
import requests
__snake_case : int = 'YOUR API KEY'
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str = giphy_api_key ) -> list:
A_ = '''+'''.join(query.split() )
A_ = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
A_ = requests.get(_UpperCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 174
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.