code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Any=13 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=None , ) -> str:
lowerCAmelCase :Optional[int] = parent
lowerCAmelCase :Optional[int] = batch_size
lowerCAmelCase :Optional[Any] = seq_length
lowerCAmelCase :List[Any] = is_training
lowerCAmelCase :Any = use_input_mask
lowerCAmelCase :int = use_token_type_ids
lowerCAmelCase :Union[str, Any] = use_labels
lowerCAmelCase :Tuple = vocab_size
lowerCAmelCase :str = hidden_size
lowerCAmelCase :List[Any] = num_hidden_layers
lowerCAmelCase :str = num_attention_heads
lowerCAmelCase :List[Any] = intermediate_size
lowerCAmelCase :List[str] = hidden_act
lowerCAmelCase :Tuple = hidden_dropout_prob
lowerCAmelCase :List[str] = attention_probs_dropout_prob
lowerCAmelCase :Dict = max_position_embeddings
lowerCAmelCase :Dict = type_vocab_size
lowerCAmelCase :Union[str, Any] = type_sequence_label_size
lowerCAmelCase :Optional[Any] = initializer_range
lowerCAmelCase :Optional[Any] = num_labels
lowerCAmelCase :int = num_choices
lowerCAmelCase :str = scope
def UpperCAmelCase__ ( self : str ) -> List[str]:
lowerCAmelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase :str = None
if self.use_input_mask:
lowerCAmelCase :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase :Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase :Dict = None
lowerCAmelCase :Any = None
lowerCAmelCase :List[str] = None
if self.use_labels:
lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase :Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ) -> int:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase :str = NystromformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase :Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCAmelCase :Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCAmelCase :int = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : int ) -> Optional[int]:
lowerCAmelCase :List[Any] = NystromformerForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase :List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ) -> Tuple:
lowerCAmelCase :List[str] = NystromformerForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase :int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any] ) -> str:
lowerCAmelCase :Optional[int] = self.num_labels
lowerCAmelCase :int = NystromformerForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase :Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ) -> str:
lowerCAmelCase :List[str] = self.num_labels
lowerCAmelCase :Dict = NystromformerForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase :List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ) -> str:
lowerCAmelCase :Any = self.num_choices
lowerCAmelCase :Optional[Any] = NystromformerForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase :List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase :Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase :Tuple = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
lowerCAmelCase :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) :Optional[int] = config_and_inputs
lowerCAmelCase :Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase_ : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : List[str] = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
def UpperCAmelCase__ ( self : int ) -> Tuple:
lowerCAmelCase :Union[str, Any] = NystromformerModelTester(self )
lowerCAmelCase :Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase :Optional[Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ) -> int:
lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase :List[str] = NystromformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase :int = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
lowerCAmelCase :Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase :str = model(__lowerCamelCase )[0]
lowerCAmelCase :int = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCAmelCase :List[str] = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Dict:
lowerCAmelCase :Any = 'the [MASK] of Belgium is Brussels'
lowerCAmelCase :Tuple = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
lowerCAmelCase :Any = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
lowerCAmelCase :Union[str, Any] = tokenizer(__lowerCamelCase , return_tensors='pt' )
with torch.no_grad():
lowerCAmelCase :str = model(encoding.input_ids ).logits
lowerCAmelCase :Union[str, Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowerCamelCase ) , 'capital' )
| 553
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt'''}
snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
snake_case = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = collections.OrderedDict()
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_snake_case = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_snake_case = token.rstrip('''\n''' )
_snake_case = index
return vocab
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Tuple=2_0_0 ):
"""simple docstring"""
_snake_case = vocab
_snake_case = unk_token
_snake_case = max_input_chars_per_word
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = list(__lowerCamelCase )
if len(__lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_snake_case = 0
_snake_case = []
while start < len(__lowerCamelCase ):
_snake_case = len(__lowerCamelCase )
_snake_case = None
while start < end:
_snake_case = ''''''.join(chars[start:end] )
if substr in self.vocab:
_snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCamelCase )
_snake_case = end
return sub_tokens
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A__ : Optional[int] = False
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str="<d>" , __lowerCamelCase : Tuple="</d>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : int="</n>" , __lowerCamelCase : Tuple="</_>" , __lowerCamelCase : Optional[Any]="left" , **__lowerCamelCase : str , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowerCamelCase , eod_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , unk_token=__lowerCamelCase , line_token=__lowerCamelCase , space_token=__lowerCamelCase , padding_side=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = bod_token
_snake_case = eod_token
_snake_case = load_vocab(__lowerCamelCase )
_snake_case = self.encoder[space_token]
_snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = []
for x in jieba.cut(__lowerCamelCase , cut_all=__lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCamelCase ) )
return output_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = [i for i in token_ids if i >= 0]
_snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return token in self.encoder
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
return "".join(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(__lowerCamelCase ):
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
_snake_case = 0
if " " in self.encoder:
_snake_case = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
_snake_case = self.encoder['''\n''']
del self.encoder["\n"]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_snake_case = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase ))
return [1] + ([0] * len(__lowerCamelCase ))
| 103
| 0
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A : int = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = PegasusConfig
lowerCamelCase__ = {}
lowerCamelCase__ = '''gelu'''
def __init__( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Optional[int]=13 , __magic_name__ : int=7 , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=False , __magic_name__ : List[str]=99 , __magic_name__ : List[str]=32 , __magic_name__ : str=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[Any]=20 , __magic_name__ : Tuple=2 , __magic_name__ : str=1 , __magic_name__ : int=0 , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def __A ( self : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict["input_ids"] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model.decode(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __A ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict["input_ids"] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__magic_name__ , decoder_position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model.decode(__magic_name__ , __magic_name__ , decoder_attention_mask=__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ )
def __A ( self : List[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
@jax.jit
def encode_jitted(__magic_name__ : str , __magic_name__ : Dict=None , **__magic_name__ : Any ):
return model.encode(input_ids=__magic_name__ , attention_mask=__magic_name__ )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE_ = encode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = encode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : str ):
return model.decode(
decoder_input_ids=__magic_name__ , decoder_attention_mask=__magic_name__ , encoder_outputs=__magic_name__ , )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE_ = decode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = decode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self : Dict ) -> List[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.ones((1, 1) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
def __A ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
SCREAMING_SNAKE_CASE_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
SCREAMING_SNAKE_CASE_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="np" , truncation=__magic_name__ , max_length=512 , padding=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.generate(**__magic_name__ , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
assert tgt_text == decoded
| 356
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : str = logging.getLogger(__name__)
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 356
| 1
|
"""simple docstring"""
UpperCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 88
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir("fixtures")
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = mock.Mock()
_lowerCAmelCase = 500
_lowerCAmelCase = {}
_lowerCAmelCase = HTTPError
_lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ):
_lowerCAmelCase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ):
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __lowerCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
_lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 712
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase__ ( A : list[list[float]] ):
'''simple docstring'''
UpperCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
UpperCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase = array(A )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase = array(A )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A )
# Calculate the inverse of the matrix
return [[float(d(A ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 210
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
@property
def a__( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCAmelCase )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase = '''google/ddpm-cifar10-32'''
UpperCAmelCase = UNetaDModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 210
| 1
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , snake_case_=None , **snake_case_ ) -> int:
super().__init__(features=snake_case_ )
_UpperCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __A ( self , snake_case_ ) -> Union[str, Any]:
import torch
if isinstance(snake_case_ , snake_case_ ) and column:
if all(
isinstance(snake_case_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(snake_case_ )
return column
def __A ( self , snake_case_ ) -> str:
import torch
if isinstance(snake_case_ , (str, bytes, type(snake_case_ )) ):
return value
elif isinstance(snake_case_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(snake_case_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_UpperCAmelCase = {"dtype": torch.intaa}
elif isinstance(snake_case_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(snake_case_ )
return torch.tensor(snake_case_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __A ( self , snake_case_ ) -> int:
import torch
# support for torch, tf, jax etc.
if hasattr(snake_case_ , "__array__" ) and not isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case_ ) for substruct in data_struct] )
elif isinstance(snake_case_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case_ ) for substruct in data_struct] )
return self._tensorize(snake_case_ )
def __A ( self , snake_case_ ) -> int:
return map_nested(self._recursive_tensorize , snake_case_ , map_list=snake_case_ )
def __A ( self , snake_case_ ) -> Mapping:
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(snake_case_ )
_UpperCAmelCase = self.python_features_decoder.decode_row(snake_case_ )
return self.recursive_tensorize(snake_case_ )
def __A ( self , snake_case_ ) -> "torch.Tensor":
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(snake_case_ )
_UpperCAmelCase = self.python_features_decoder.decode_column(snake_case_ , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(snake_case_ )
_UpperCAmelCase = self._consolidate(snake_case_ )
return column
def __A ( self , snake_case_ ) -> Mapping:
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(snake_case_ )
_UpperCAmelCase = self.python_features_decoder.decode_batch(snake_case_ )
_UpperCAmelCase = self.recursive_tensorize(snake_case_ )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 579
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "luke"
def __init__( self , snake_case_=50267 , snake_case_=500000 , snake_case_=768 , snake_case_=256 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=True , snake_case_=None , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> Any:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = entity_vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = entity_emb_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_entity_aware_attention
_UpperCAmelCase = classifier_dropout
| 579
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[str] = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE_ : str = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
SCREAMING_SNAKE_CASE_ : Dict = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
class snake_case_ ( a_ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : str=False , __lowerCamelCase : Dict=True , __lowerCamelCase : str=False , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : str="<sep>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="<cls>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : List[str]=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : str , ) -> None:
'''simple docstring'''
__lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> List[Any]:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str , __lowerCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self.remove_space:
__lowercase = """ """.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace('``' , '\"' ).replace('\'\'' , '\"' )
if not self.keep_accents:
__lowercase = unicodedata.normalize('NFKD' , __lowerCamelCase )
__lowercase = """""".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
__lowercase = self.preprocess_text(__lowerCamelCase )
__lowercase = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
__lowercase = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ) -> Optional[int]:
'''simple docstring'''
__lowercase = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , ' ' ).strip()
return out_string
def UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : str , ) -> str:
'''simple docstring'''
__lowercase = kwargs.pop('use_source_tokenizer' , __lowerCamelCase )
__lowercase = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
__lowercase = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase = """""".join(__lowerCamelCase )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 375
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> list:
lowerCAmelCase__ : List[Any] = len(__UpperCAmelCase )
for i in range(1 , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = collection[i]
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = i - 1
while low <= high:
lowerCAmelCase__ : str = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ : List[Any] = mid - 1
else:
lowerCAmelCase__ : Optional[int] = mid + 1
for j in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
lowerCAmelCase__ : Dict = collection[j - 1]
lowerCAmelCase__ : Union[str, Any] = val
return collection
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 299
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = True
while ask_again:
__SCREAMING_SNAKE_CASE : int = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=[] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : List[Any]=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = int(_SCREAMING_SNAKE_CASE )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def __A ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def __A ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def a_ ( self , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Dict = super()._format_usage(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Dict = usage.replace("<command> [<args>] " , "" )
return usage
| 564
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowercase = '''▁'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [f'<extra_id_{i}>' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__SCREAMING_SNAKE_CASE : int = legacy
__SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
__SCREAMING_SNAKE_CASE : Dict = vocab_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self ):
return list(
set(filter(lambda a__ : bool(re.search(R"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : List[str] = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Any = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self , a__ , **a__ ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text.startswith(a__ )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : List[str] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self , a__ ):
if token.startswith("<extra_id_" ):
__SCREAMING_SNAKE_CASE : Any = re.match(R"<extra_id_(\d+)>" , a__ )
__SCREAMING_SNAKE_CASE : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self , a__ ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : Any = self.sp_model.IdToPiece(a__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
__SCREAMING_SNAKE_CASE : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Any = []
else:
current_sub_tokens.append(a__ )
__SCREAMING_SNAKE_CASE : Dict = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
__SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 564
| 1
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(_UpperCamelCase , 2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = str(_UpperCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
__UpperCAmelCase : Any = """-""" if number.startswith("""-""" ) else """"""
__UpperCAmelCase : List[str] = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'''{negative}0b{binary_recursive(int(_UpperCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 139
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase : Optional[Any] = True
except ImportError:
UpperCAmelCase : Optional[int] = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase : Union[str, Any] = _get_torch_home()
except ImportError:
UpperCAmelCase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
UpperCAmelCase : Any = os.path.join(torch_cache_home, 'transformers')
UpperCAmelCase : int = 'https://cdn.huggingface.co'
UpperCAmelCase : Dict = 'https://s3.amazonaws.com/models.huggingface.co/bert'
UpperCAmelCase : str = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
UpperCAmelCase : List[str] = os.path.join(PATH, 'config.yaml')
UpperCAmelCase : int = os.path.join(PATH, 'attributes.txt')
UpperCAmelCase : Dict = os.path.join(PATH, 'objects.txt')
UpperCAmelCase : str = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
UpperCAmelCase : List[str] = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase : Optional[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase : Tuple = 'pytorch_model.bin'
UpperCAmelCase : List[Any] = 'config.yaml'
def lowerCamelCase ( _UpperCamelCase : Any=OBJECTS , _UpperCamelCase : str=ATTRIBUTES ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Any = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase ( _UpperCamelCase : int ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Tuple = OrderedDict()
with open(_UpperCamelCase , """rb""" ) as f:
__UpperCAmelCase : List[str] = pkl.load(_UpperCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[str] = ckp.pop(_UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
__UpperCAmelCase : List[Any] = torch.tensor(_UpperCamelCase )
else:
assert isinstance(_UpperCamelCase , torch.tensor ), type(_UpperCamelCase )
__UpperCAmelCase : str = v
return r
class lowerCamelCase__ :
"""simple docstring"""
__a = {}
def __init__( self : Optional[Any] , UpperCamelCase : dict , UpperCamelCase : str = "root" , UpperCamelCase : Tuple=0 ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = name
__UpperCAmelCase : Optional[Any] = level
__UpperCAmelCase : List[str] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : Any = copy.deepcopy(UpperCamelCase )
__UpperCAmelCase : int = copy.deepcopy(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : Any = Config(UpperCamelCase , name=UpperCamelCase , level=level + 1 )
__UpperCAmelCase : List[str] = v
setattr(self , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = d
def __repr__( self : Dict ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : str , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = val
__UpperCAmelCase : Optional[int] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : Any = len(UpperCamelCase ) - 1
__UpperCAmelCase : Tuple = self._pointer
if len(UpperCamelCase ) > 1:
for i, l in enumerate(UpperCamelCase ):
if hasattr(self , UpperCamelCase ) and isinstance(getattr(self , UpperCamelCase ) , UpperCamelCase ):
setattr(getattr(self , UpperCamelCase ) , """.""".join(levels[i:] ) , UpperCamelCase )
if l == last_level:
__UpperCAmelCase : List[str] = val
else:
__UpperCAmelCase : Dict = pointer[l]
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : Any ):
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
dump(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
json.dump(UpperCamelCase , UpperCamelCase )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase : Any ):
'''simple docstring'''
with open(UpperCamelCase ) as stream:
__UpperCAmelCase : List[Any] = load(UpperCamelCase , Loader=UpperCamelCase )
return data
def __str__( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = """ """
if self._name != "root":
__UpperCAmelCase : Tuple = f'''{t * (self._level-1)}{self._name}:\n'''
else:
__UpperCAmelCase : List[str] = """"""
__UpperCAmelCase : Union[str, Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCamelCase , UpperCamelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCamelCase ).__name__})\n'''
__UpperCAmelCase : Optional[Any] = level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls : Dict , UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Any = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
return cls(UpperCamelCase )
@classmethod
def lowerCamelCase__ ( cls : List[Any] , UpperCamelCase : str , **UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = kwargs.pop("""cache_dir""" , UpperCamelCase )
__UpperCAmelCase : str = kwargs.pop("""force_download""" , UpperCamelCase )
__UpperCAmelCase : Optional[int] = kwargs.pop("""resume_download""" , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""proxies""" , UpperCamelCase )
__UpperCAmelCase : int = kwargs.pop("""local_files_only""" , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
__UpperCAmelCase : str = os.path.join(UpperCamelCase , UpperCamelCase )
elif os.path.isfile(UpperCamelCase ) or is_remote_url(UpperCamelCase ):
__UpperCAmelCase : List[str] = pretrained_model_name_or_path
else:
__UpperCAmelCase : List[str] = hf_bucket_url(UpperCamelCase , filename=UpperCamelCase , use_cdn=UpperCamelCase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : str = cached_path(
UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[Any] = Config.load_yaml(UpperCamelCase )
except EnvironmentError:
__UpperCAmelCase : Any = """Can't load config for"""
raise EnvironmentError(UpperCamelCase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(UpperCamelCase ), kwargs
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Dict = in_tensor.numpy()
__UpperCAmelCase : int = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = urlparse(_UpperCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Tuple=True ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=None , ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(_UpperCamelCase , _UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : int = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Dict = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : List[str] = requests.get(_UpperCamelCase , stream=_UpperCamelCase , proxies=_UpperCamelCase , headers=_UpperCamelCase )
if response.status_code == 4_1_6: # Range not satisfiable
return
__UpperCAmelCase : Dict = response.headers.get("""Content-Length""" )
__UpperCAmelCase : List[Any] = resume_size + int(_UpperCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[str] = tqdm(
unit="""B""" , unit_scale=_UpperCamelCase , total=_UpperCamelCase , initial=_UpperCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_UpperCamelCase ) )
temp_file.write(_UpperCamelCase )
progress.close()
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=1_0 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Dict=None , _UpperCamelCase : int=False , ) -> Optional[Any]:
'''simple docstring'''
if cache_dir is None:
__UpperCAmelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Dict = str(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__UpperCAmelCase : Optional[int] = None
if not local_files_only:
try:
__UpperCAmelCase : Union[str, Any] = requests.head(_UpperCamelCase , allow_redirects=_UpperCamelCase , proxies=_UpperCamelCase , timeout=_UpperCamelCase )
if response.status_code == 2_0_0:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : Optional[Any] = url_to_filename(_UpperCamelCase , _UpperCamelCase )
# get cache path to put the file
__UpperCAmelCase : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_UpperCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(_UpperCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_UpperCamelCase ) > 0:
return os.path.join(_UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : Optional[Any] = cache_path + """.lock"""
with FileLock(_UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : Optional[Any] = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_UpperCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : Dict = _resumable_file_manager
if os.path.exists(_UpperCamelCase ):
__UpperCAmelCase : Dict = os.stat(_UpperCamelCase ).st_size
else:
__UpperCAmelCase : str = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=_UpperCamelCase , delete=_UpperCamelCase )
__UpperCAmelCase : str = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _UpperCamelCase , temp_file.name , )
http_get(
_UpperCamelCase , _UpperCamelCase , proxies=_UpperCamelCase , resume_size=_UpperCamelCase , user_agent=_UpperCamelCase , )
os.replace(temp_file.name , _UpperCamelCase )
__UpperCAmelCase : str = {"""url""": url, """etag""": etag}
__UpperCAmelCase : int = cache_path + """.json"""
with open(_UpperCamelCase , """w""" ) as meta_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
return cache_path
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict=None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = url.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(_UpperCamelCase )
__UpperCAmelCase : Tuple = url_hash.hexdigest()
if etag:
__UpperCAmelCase : List[str] = etag.encode("""utf-8""" )
__UpperCAmelCase : List[Any] = shaaaa(_UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : List[Any]=False , _UpperCamelCase : str=None , _UpperCamelCase : str=False , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Tuple=False , _UpperCamelCase : Any=False , _UpperCamelCase : int=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
__UpperCAmelCase : Dict = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : str = str(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : str = str(_UpperCamelCase )
if is_remote_url(_UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : List[Any] = get_from_cache(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , user_agent=_UpperCamelCase , local_files_only=_UpperCamelCase , )
elif os.path.exists(_UpperCamelCase ):
# File, and it exists.
__UpperCAmelCase : Union[str, Any] = url_or_filename
elif urlparse(_UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_UpperCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_UpperCamelCase ) and not tarfile.is_tarfile(_UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase ,__UpperCAmelCase : Any = os.path.split(_UpperCamelCase )
__UpperCAmelCase : Dict = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : Optional[int] = output_path + """.lock"""
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
os.makedirs(_UpperCamelCase )
if is_zipfile(_UpperCamelCase ):
with ZipFile(_UpperCamelCase , """r""" ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_UpperCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]="," ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as f:
__UpperCAmelCase : Optional[int] = eval(f.read() )
else:
__UpperCAmelCase : Tuple = requests.get(_UpperCamelCase )
try:
__UpperCAmelCase : Union[str, Any] = requests.json()
except Exception:
__UpperCAmelCase : Optional[int] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : Optional[int] = eval(_UpperCamelCase )
except Exception:
__UpperCAmelCase : str = data.split("""\n""" )
req.close()
return data
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = requests.get(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_UpperCamelCase )
with open(_UpperCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[Any] = pkl.load(_UpperCamelCase )
__UpperCAmelCase : Any = weights.pop("""model""" )
__UpperCAmelCase : Optional[int] = {}
for k, v in model.items():
__UpperCAmelCase : Any = torch.from_numpy(_UpperCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Any = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Union[str, Any] = zero
return new
def lowerCamelCase ( ) -> str:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(_UpperCamelCase , os.pardir ) )}/demo.ipynb''' )
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Any="RGB" ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
__UpperCAmelCase : Tuple = cva.imread(_UpperCamelCase )
else:
__UpperCAmelCase : Tuple = get_image_from_url(_UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
__UpperCAmelCase : List[str] = cva.cvtColor(_UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Union[str, Any] = img[:, :, ::-1]
return img
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str]=1 ) -> Any:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ))
| 139
| 1
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = 0.01 , SCREAMING_SNAKE_CASE_ = 1 , ) -> int:
lowerCAmelCase__ : str = False
lowerCAmelCase__ : str = search_prob
lowerCAmelCase__ : int = start_temperate
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : List[Any] = None
while not search_end:
lowerCAmelCase__ : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase__ : Any = current_state
scores.append(SCREAMING_SNAKE_CASE_ )
iterations += 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase__ : Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor
lowerCAmelCase__ : Optional[Any] = neighbors.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase__ : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase__ : Optional[Any] = picked_neighbor
else:
lowerCAmelCase__ : int = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase__ : Union[str, Any] = picked_neighbor
lowerCAmelCase__ : str = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase__ : Any = True
else:
lowerCAmelCase__ : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return (3 * x**2) - (6 * y)
lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 704
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
| 69
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : Any):
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if inductance < 0:
raise ValueError("""Inductance cannot be negative""")
if frequency < 0:
raise ValueError("""Frequency cannot be negative""")
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75
| 0
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCAmelCase__ :int = flax_key_tuple[:-1] + ('weight',)
lowerCAmelCase__ :List[Any] = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
lowerCAmelCase__ :Tuple = flax_key_tuple[:-1] + ('weight',)
lowerCAmelCase__ :Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase__ :Optional[int] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if "metadata" in layer:
lowerCAmelCase__ :Optional[Any] = layer.split('metadata' )
lowerCAmelCase__ :str = ''.join(split_layer[0] )[:-1]
lowerCAmelCase__ :Optional[int] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
lowerCAmelCase__ :Union[str, Any] = layer.split('kvstore' )
lowerCAmelCase__ :Dict = ''.join(split_layer[0] )[:-1]
lowerCAmelCase__ :str = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
lowerCAmelCase__ :Union[str, Any] = layer.split('/' )
lowerCAmelCase__ :Optional[int] = '/'.join(split_layer[:-1] )
lowerCAmelCase__ :Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCAmelCase__ :str = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
lowerCAmelCase__ :Optional[int] = 'file'
else:
lowerCAmelCase__ :List[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :int = rename_keys(UpperCamelCase__ )
lowerCAmelCase__ :Union[str, Any] = {}
for k, v in current_block.items():
lowerCAmelCase__ :Dict = v
lowerCAmelCase__ :Dict = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = convert_file_size_to_int(UpperCamelCase__ )
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :List[str] = {}
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Tuple = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
lowerCAmelCase__ :str = serialization.msgpack_restore(fp.read() )['optimizer']['target']
lowerCAmelCase__ :Optional[int] = flatten_dict(UpperCamelCase__ , sep='/' )
lowerCAmelCase__ :List[str] = {}
for layer in checkpoint_info.keys():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
lowerCAmelCase__ :Union[str, Any] = content
else:
lowerCAmelCase__ :Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCAmelCase__ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCAmelCase__ :Optional[int] = torch.tensor(UpperCamelCase__ )
lowerCAmelCase__ :List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , UpperCamelCase__ )
lowerCAmelCase__ :int = '/'.join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCAmelCase__ :List[Any] = os.path.join(
UpperCamelCase__ , weights_name.replace('.bin' , F"-{len(UpperCamelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCAmelCase__ :str = {}
lowerCAmelCase__ :List[Any] = 0
lowerCAmelCase__ :Tuple = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCAmelCase__ :str = os.path.join(UpperCamelCase__ , weights_name.replace('.bin' , F"-{len(UpperCamelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCAmelCase__ :Optional[int] = {}
lowerCAmelCase__ :Dict = {}
for idx, shard in enumerate(UpperCamelCase__ ):
lowerCAmelCase__ :Union[str, Any] = weights_name.replace(
'.bin' , F"-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
lowerCAmelCase__ :Any = os.path.join(UpperCamelCase__ , weights_name.replace('.bin' , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase__ :Tuple = shard
for key in shard:
lowerCAmelCase__ :Optional[int] = shard_file
# Add the metadata
lowerCAmelCase__ :int = {'total_size': total_size}
lowerCAmelCase__ :int = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Tuple = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '\n'
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A () ->Any:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCAmelCase__ :Optional[Any] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
lowerCAmelCase__ :Optional[int] = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
lowerCAmelCase__ :Dict = TaTokenizer.from_pretrained('t5-small' )
lowerCAmelCase__ :Dict = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
lowerCAmelCase__ :Dict = tokenizer(UpperCamelCase__ , return_tensors='pt' ).input_ids
lowerCAmelCase__ :Dict = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 720
|
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Optional[int] = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = len(__UpperCAmelCase )
lowerCAmelCase__ :str = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[str] = [sources]
if sinks is int:
lowerCAmelCase__ :Optional[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Optional[int] = max_input_flow
lowerCAmelCase__ :Tuple = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = flow_network
lowerCAmelCase__ :List[Any] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Dict = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :int = [0] * self.verticies_count
lowerCAmelCase__ :str = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :int = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Tuple = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Any = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Any = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560
| 0
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
def count_of_possible_combinations(__lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__lowerCAmelCase , __lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
snake_case__ = sum(
count_of_possible_combinations_with_dp_array(target - item , __lowerCAmelCase )
for item in array )
snake_case__ = answer
return answer
snake_case__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
snake_case__ = [0] * (target + 1)
snake_case__ = 1
for i in range(1 , target + 1 ):
for j in range(__lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Any = 3
lowerCamelCase__ : int = 5
lowerCamelCase__ : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3
| 0
|
from __future__ import annotations
from math import pi
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Tuple ={
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =[
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] =[
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] =[
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 72
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase_ ( _lowercase , _lowercase=1 ) -> str:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowerCamelCase_ ( _lowercase , _lowercase=0 ) -> Optional[Any]:
__A : Dict = []
for old_item in old_list:
__A : int = old_item.replace("in_layers.0" , "norm1" )
__A : Union[str, Any] = new_item.replace("in_layers.2" , "conv1" )
__A : int = new_item.replace("out_layers.0" , "norm2" )
__A : Dict = new_item.replace("out_layers.3" , "conv2" )
__A : Optional[Any] = new_item.replace("emb_layers.1" , "time_emb_proj" )
__A : Union[str, Any] = new_item.replace("skip_connection" , "conv_shortcut" )
__A : Optional[int] = shave_segments(UpperCAmelCase__ , n_shave_prefix_segments=UpperCAmelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCamelCase_ ( _lowercase , _lowercase=0 ) -> List[Any]:
__A : Dict = []
for old_item in old_list:
__A : Optional[Any] = old_item
__A : Any = new_item.replace("norm.weight" , "group_norm.weight" )
__A : Any = new_item.replace("norm.bias" , "group_norm.bias" )
__A : int = new_item.replace("proj_out.weight" , "proj_attn.weight" )
__A : Any = new_item.replace("proj_out.bias" , "proj_attn.bias" )
__A : List[Any] = shave_segments(UpperCAmelCase__ , n_shave_prefix_segments=UpperCAmelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ) -> Tuple:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__A : Dict = old_checkpoint[path]
__A : Optional[int] = old_tensor.shape[0] // 3
__A : str = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__A : Union[str, Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
__A : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__A , __A , __A : Optional[Any] = old_tensor.split(channels // num_heads , dim=1 )
__A : str = query.reshape(UpperCAmelCase__ )
__A : int = key.reshape(UpperCAmelCase__ )
__A : Any = value.reshape(UpperCAmelCase__ )
for path in paths:
__A : List[str] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__A : Any = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
__A : str = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
__A : List[str] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
__A : List[str] = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__A : List[str] = old_checkpoint[path["old"]][:, :, 0]
else:
__A : Tuple = old_checkpoint[path["old"]]
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Dict:
__A : Tuple = {}
__A : str = checkpoint["time_embed.0.weight"]
__A : Optional[Any] = checkpoint["time_embed.0.bias"]
__A : Any = checkpoint["time_embed.2.weight"]
__A : Any = checkpoint["time_embed.2.bias"]
__A : Union[str, Any] = checkpoint["input_blocks.0.0.weight"]
__A : int = checkpoint["input_blocks.0.0.bias"]
__A : Optional[int] = checkpoint["out.0.weight"]
__A : Tuple = checkpoint["out.0.bias"]
__A : List[str] = checkpoint["out.2.weight"]
__A : Optional[Any] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
__A : int = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
__A : List[str] = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the middle blocks only
__A : Union[str, Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
__A : Optional[Any] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the output blocks only
__A : Any = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
__A : str = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(UpperCAmelCase__ )
}
for i in range(1 , UpperCAmelCase__ ):
__A : Optional[Any] = (i - 1) // (config["num_res_blocks"] + 1)
__A : Dict = (i - 1) % (config["num_res_blocks"] + 1)
__A : List[Any] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
__A : Dict = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
__A : Union[str, Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
__A : List[str] = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
__A : List[str] = renew_resnet_paths(UpperCAmelCase__ )
__A : str = {"old": F"input_blocks.{i}.0", "new": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
__A : Dict = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase__ )
if len(UpperCAmelCase__ ):
__A : Any = renew_attention_paths(UpperCAmelCase__ )
__A : List[str] = {
"old": F"input_blocks.{i}.1",
"new": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
__A : Union[str, Any] = {
F"input_blocks.{i}.1.qkv.bias": {
"key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase__ , config=UpperCAmelCase__ , )
__A : Dict = middle_blocks[0]
__A : List[str] = middle_blocks[1]
__A : Optional[Any] = middle_blocks[2]
__A : Any = renew_resnet_paths(UpperCAmelCase__ )
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , config=UpperCAmelCase__ )
__A : List[str] = renew_resnet_paths(UpperCAmelCase__ )
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , config=UpperCAmelCase__ )
__A : List[Any] = renew_attention_paths(UpperCAmelCase__ )
__A : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , attention_paths_to_split=UpperCAmelCase__ , config=UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
__A : Tuple = i // (config["num_res_blocks"] + 1)
__A : Any = i % (config["num_res_blocks"] + 1)
__A : List[Any] = [shave_segments(UpperCAmelCase__ , 2 ) for name in output_blocks[i]]
__A : str = {}
for layer in output_block_layers:
__A , __A : str = layer.split("." )[0], shave_segments(UpperCAmelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase__ )
else:
__A : Any = [layer_name]
if len(UpperCAmelCase__ ) > 1:
__A : Optional[int] = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
__A : List[Any] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
__A : Optional[int] = renew_resnet_paths(UpperCAmelCase__ )
__A : str = renew_resnet_paths(UpperCAmelCase__ )
__A : Optional[Any] = {"old": F"output_blocks.{i}.0", "new": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__A : str = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
__A : Union[str, Any] = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
__A : int = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase__ ) == 2:
__A : Union[str, Any] = []
if len(UpperCAmelCase__ ):
__A : str = renew_attention_paths(UpperCAmelCase__ )
__A : Tuple = {
"old": F"output_blocks.{i}.1",
"new": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
__A : List[str] = {
F"output_blocks.{i}.1.qkv.bias": {
"key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=UpperCAmelCase__ , )
else:
__A : Optional[Any] = renew_resnet_paths(UpperCAmelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__A : Tuple = ".".join(["output_blocks", str(UpperCAmelCase__ ), path["old"]] )
__A : Union[str, Any] = ".".join(["up_blocks", str(UpperCAmelCase__ ), "resnets", str(UpperCAmelCase__ ), path["new"]] )
__A : List[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase = json.loads(f.read())
UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 520
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if len(UpperCAmelCase__ ) == 0:
return array
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
# Compute the variables
_SCREAMING_SNAKE_CASE = _max - _min + 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_SCREAMING_SNAKE_CASE = i - _min
_SCREAMING_SNAKE_CASE = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_SCREAMING_SNAKE_CASE = 0
for i in range(UpperCAmelCase__ ):
while holes_repeat[i] > 0:
_SCREAMING_SNAKE_CASE = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Dict = input('Enter numbers separated by comma:\n')
snake_case : List[Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 605
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict=13 , __lowerCamelCase : int=30 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=2 , ) -> str:
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scope
lowerCamelCase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = num_patches + 1
def a__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def a__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase__ = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = self.type_sequence_label_size
lowerCamelCase__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ = 1
lowerCamelCase__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def a__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = ViTModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def a__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def a__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def a__ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def a__ ( self : Tuple ) -> str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(__lowerCamelCase )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def a__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def a__ ( self : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def a__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCamelCase_ ( ):
lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def a__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__lowerCamelCase )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**__lowerCamelCase )
# verify the logits
lowerCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowerCamelCase__ = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self : Any ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__lowerCamelCase )
lowerCamelCase__ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
lowerCamelCase__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(__lowerCamelCase , interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
lowerCamelCase__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
lowerCamelCase__ = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
lowerCamelCase__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCamelCase__ = model(__lowerCamelCase )
| 187
|
'''simple docstring'''
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float(moles / volume) * nfactor)
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float((moles * 0.0_821 * temperature) / (volume)))
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float((moles * 0.0_821 * temperature) / (pressure)))
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float((pressure * volume) / (0.0_821 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def SCREAMING_SNAKE_CASE__ ( _lowercase : List[str] ) -> Optional[int]:
'''simple docstring'''
if hor == 128:
lowercase__ : Any = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowercase__ : str = (32, 128, 256)
lowercase__ : int = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowercase__ : Optional[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowercase__ : Dict = (32, 64, 128, 256)
lowercase__ : List[str] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowercase__ : Any = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
lowercase__ : Tuple = model.state_dict()
lowercase__ : Union[str, Any] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowercase__ : Optional[int] = UNetaDModel(**_lowercase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowercase__ : Union[str, Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase__ : Tuple = state_dict.pop(_lowercase )
hf_value_function.load_state_dict(_lowercase )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(_lowercase , _lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : int = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowercase__ : str = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowercase__ : List[Any] = model
lowercase__ : Optional[Any] = UNetaDModel(**_lowercase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowercase__ : Tuple = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase__ : Union[str, Any] = state_dict.pop(_lowercase )
hf_value_function.load_state_dict(_lowercase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(_lowercase , _lowercase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 266
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Any, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str]=13, lowerCamelCase_: Optional[Any]=7, lowerCamelCase_: Optional[Any]=True, lowerCamelCase_: Tuple=True, lowerCamelCase_: Any=False, lowerCamelCase_: Union[str, Any]=True, lowerCamelCase_: Optional[Any]=99, lowerCamelCase_: Tuple=32, lowerCamelCase_: Any=5, lowerCamelCase_: Tuple=4, lowerCamelCase_: List[Any]=37, lowerCamelCase_: Union[str, Any]="gelu", lowerCamelCase_: str=0.1, lowerCamelCase_: Union[str, Any]=0.1, lowerCamelCase_: Any=512, lowerCamelCase_: Union[str, Any]=16, lowerCamelCase_: Any=2, lowerCamelCase_: str=0.0_2, lowerCamelCase_: Union[str, Any]=3, lowerCamelCase_: List[str]=4, lowerCamelCase_: Tuple=None, ):
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Optional[int] = num_choices
lowercase__ : Dict = scope
def snake_case__( self: Union[str, Any] ):
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size], self.num_choices )
lowercase__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self: Tuple ):
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, use_stable_embedding=lowerCamelCase_, )
def snake_case__( self: Optional[int], lowerCamelCase_: Optional[int], lowerCamelCase_: List[Any], lowerCamelCase_: List[str], lowerCamelCase_: Optional[int], lowerCamelCase_: Dict, lowerCamelCase_: Optional[int], lowerCamelCase_: str ):
lowercase__ : Union[str, Any] = OpenLlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Union[str, Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowercase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: str, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Tuple, lowerCamelCase_: Optional[Any], lowerCamelCase_: List[str], lowerCamelCase_: Dict, lowerCamelCase_: Union[str, Any], ):
lowercase__ : Tuple = True
lowercase__ : int = OpenLlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, )
lowercase__ : Optional[int] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, )
lowercase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self: List[Any], lowerCamelCase_: Optional[Any], lowerCamelCase_: Any, lowerCamelCase_: str, lowerCamelCase_: List[str], lowerCamelCase_: Any, lowerCamelCase_: Dict, lowerCamelCase_: int, lowerCamelCase_: Any, lowerCamelCase_: str, ):
lowercase__ : Optional[Any] = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self: Tuple, lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Optional[int], lowerCamelCase_: str, lowerCamelCase_: List[Any], lowerCamelCase_: List[Any], lowerCamelCase_: str, lowerCamelCase_: Optional[Any], lowerCamelCase_: Union[str, Any], ):
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = OpenLlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowercase__ : Any = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_, )
lowercase__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
lowercase__ : str = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowercase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowercase__ : List[str] = torch.cat([input_mask, next_mask], dim=-1 )
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
lowercase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowercase__ : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowercase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-3 ) )
def snake_case__( self: Optional[Any] ):
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : List[str] = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def snake_case__( self: Any ):
lowercase__ : List[Any] = OpenLlamaModelTester(self )
lowercase__ : Dict = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=37 )
def snake_case__( self: Any ):
self.config_tester.run_common_tests()
def snake_case__( self: Dict ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: str ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Dict = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = 3
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : int = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Any ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Optional[Any] = 'single_label_classification'
lowercase__ : Union[str, Any] = input_dict['input_ids']
lowercase__ : str = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowercase__ : Dict = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__( self: Union[str, Any] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : List[Any] = 'multi_label_classification'
lowercase__ : Any = input_dict['input_ids']
lowercase__ : int = input_ids.ne(1 ).to(lowerCamelCase_ )
lowercase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[int] = OpenLlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__( self: List[str] ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__( self: Any, lowerCamelCase_: List[Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = ids_tensor([1, 10], config.vocab_size )
lowercase__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : str = OpenLlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowercase__ : Union[str, Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : List[str] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : List[str] = {'type': scaling_type, 'factor': 1_0.0}
lowercase__ : Union[str, Any] = OpenLlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowercase__ : Dict = scaled_model(lowerCamelCase_ ).last_hidden_state
lowercase__ : Union[str, Any] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1E-5 ) )
| 266
| 1
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
snake_case_ : int = "path-to-your-trained-model"
snake_case_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
snake_case_ : int = "A photo of sks dog in a bucket"
snake_case_ : Optional[Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 644
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : List[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''mgp-str'''
def __init__( self : List[str] , A_ : Optional[int]=[32, 128] , A_ : Optional[Any]=4 , A_ : Dict=3 , A_ : Dict=27 , A_ : Union[str, Any]=38 , A_ : str=50_257 , A_ : Any=30_522 , A_ : Tuple=768 , A_ : List[str]=12 , A_ : List[str]=12 , A_ : Tuple=4.0 , A_ : int=True , A_ : Optional[Any]=False , A_ : str=1E-5 , A_ : int=0.0 , A_ : Optional[Any]=0.0 , A_ : List[str]=0.0 , A_ : Union[str, Any]=False , A_ : int=0.02 , **A_ : Dict , ) -> Dict:
super().__init__(**A_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = max_token_length
__snake_case = num_character_labels
__snake_case = num_bpe_labels
__snake_case = num_wordpiece_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = mlp_ratio
__snake_case = distilled
__snake_case = layer_norm_eps
__snake_case = drop_rate
__snake_case = qkv_bias
__snake_case = attn_drop_rate
__snake_case = drop_path_rate
__snake_case = output_aa_attentions
__snake_case = initializer_range
| 564
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
| 1
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_ ( A__ ):
@wraps(A__ )
def _inner_fn(*A__ , **A__ ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , A__ , )
return fn(*A__ , **A__ )
return _inner_fn
| 511
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ =logging.get_logger(__name__)
def UpperCamelCase_ ( A__ ):
if isinstance(A__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 2_55 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
a_ = size if size is not None else {"""shortest_edge""": 2_24}
a_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
a_ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
a_ = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
a_ = do_resize
a_ = size
a_ = do_center_crop
a_ = crop_size
a_ = resample
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ):
a_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
a_ = get_resize_output_image_size(UpperCAmelCase , size["""shortest_edge"""] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
a_ = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
a_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
a_ = to_numpy_array(UpperCAmelCase )
if do_resize:
a_ = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
a_ = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
a_ = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
a_ = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
a_ = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
a_ = do_resize if do_resize is not None else self.do_resize
a_ = resample if resample is not None else self.resample
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = size if size is not None else self.size
a_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
a_ = make_batched(UpperCAmelCase )
a_ = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
a_ = {"""pixel_values""": videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 511
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase_ :
_UpperCamelCase : Optional[int] = XGLMConfig
_UpperCamelCase : str = {}
_UpperCamelCase : List[str] = "gelu"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_4 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , ):
_lowercase : Tuple = parent
_lowercase : int = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : int = is_training
_lowercase : Union[str, Any] = use_input_mask
_lowercase : str = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[int] = d_model
_lowercase : List[str] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : str = ffn_dim
_lowercase : List[str] = activation_function
_lowercase : Optional[Any] = activation_dropout
_lowercase : str = attention_dropout
_lowercase : Dict = max_position_embeddings
_lowercase : Any = initializer_range
_lowercase : List[Any] = None
_lowercase : List[str] = 0
_lowercase : List[str] = 2
_lowercase : List[Any] = 1
def __a ( self ):
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __a ( self ):
_lowercase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[int] = self.get_config()
_lowercase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCAmelCase , )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowercase
) : Optional[int] = config_and_inputs
_lowercase : str = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_UpperCamelCase : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCamelCase : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCamelCase : int = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
def __a ( self ):
_lowercase : Dict = TFXGLMModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
@slow
def __a ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = TFXGLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __a ( self ):
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self , _lowerCAmelCase=True ):
_lowercase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowercase : Union[str, Any] = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowercase : List[str] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_lowercase : int = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCAmelCase )
@slow
def __a ( self ):
_lowercase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowercase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_lowercase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
_lowercase : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_lowercase : Optional[int] = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase , seed=[7, 0] )
_lowercase : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCAmelCase )
_lowercase : Optional[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __a ( self ):
_lowercase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowercase : List[str] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowercase : List[str] = "left"
# use different length sentences to test batching
_lowercase : Any = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_lowercase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors='tf' , padding=_lowerCAmelCase )
_lowercase : List[Any] = inputs["input_ids"]
_lowercase : Union[str, Any] = model.generate(input_ids=_lowerCAmelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
_lowercase : Dict = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowercase : Dict = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=1_2 )
_lowercase : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowercase : List[str] = model.generate(input_ids=_lowerCAmelCase , max_new_tokens=1_2 )
_lowercase : Optional[Any] = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase )
_lowercase : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase )
_lowercase : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] )
| 66
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : str = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 244
| 0
|
def lowercase ( _a ,_a ,_a ,_a ) -> int:
UpperCAmelCase_ , UpperCAmelCase_: str = len(_a ), len(grid[0] )
if (
min(_a ,_a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_: Any = 0
count += depth_first_search(_a ,row + 1 ,_a ,_a )
count += depth_first_search(_a ,row - 1 ,_a ,_a )
count += depth_first_search(_a ,_a ,col + 1 ,_a )
count += depth_first_search(_a ,_a ,col - 1 ,_a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase__ ( snake_case__ , snake_case__ ):
snake_case_ = '''swin'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: Any = image_size
UpperCAmelCase_: Optional[int] = patch_size
UpperCAmelCase_: Optional[int] = num_channels
UpperCAmelCase_: Optional[Any] = embed_dim
UpperCAmelCase_: Union[str, Any] = depths
UpperCAmelCase_: List[Any] = len(A__ )
UpperCAmelCase_: Union[str, Any] = num_heads
UpperCAmelCase_: Dict = window_size
UpperCAmelCase_: int = mlp_ratio
UpperCAmelCase_: int = qkv_bias
UpperCAmelCase_: Dict = hidden_dropout_prob
UpperCAmelCase_: Any = attention_probs_dropout_prob
UpperCAmelCase_: int = drop_path_rate
UpperCAmelCase_: Dict = hidden_act
UpperCAmelCase_: Union[str, Any] = use_absolute_embeddings
UpperCAmelCase_: Any = layer_norm_eps
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Optional[Any] = int(embed_dim * 2 ** (len(A__ ) - 1) )
UpperCAmelCase_: int = ["stem"] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names )
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1E-4
| 306
| 1
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ):
UpperCamelCase_: int = parent
UpperCamelCase_: Any = batch_size
UpperCamelCase_: Tuple = seq_length
UpperCamelCase_: int = is_training
UpperCamelCase_: Dict = use_attention_mask
UpperCamelCase_: Optional[int] = use_token_type_ids
UpperCamelCase_: Optional[Any] = use_labels
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Any = hidden_size
UpperCamelCase_: Optional[int] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Tuple = intermediate_size
UpperCamelCase_: str = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: Optional[Any] = type_vocab_size
UpperCamelCase_: Dict = type_sequence_label_size
UpperCamelCase_: List[Any] = initializer_range
UpperCamelCase_: List[str] = num_choices
def _a ( self ):
UpperCamelCase_: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: str = None
if self.use_attention_mask:
UpperCamelCase_: str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: str = None
if self.use_token_type_ids:
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: str = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ):
UpperCamelCase_: str = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = config_and_inputs
UpperCamelCase_: Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _a ( self ):
UpperCamelCase_: Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: List[Any] = config_and_inputs
UpperCamelCase_: Dict = True
UpperCamelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Any =True
a : Tuple =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ):
UpperCamelCase_: Union[str, Any] = FlaxBertModelTester(self )
@slow
def _a ( self ):
UpperCamelCase_: str = FlaxBertModel.from_pretrained('bert-base-cased' )
UpperCamelCase_: Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 57
|
import re
import string
import numpy as np
import datasets
__UpperCamelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__UpperCamelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__UpperCamelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def snake_case__ ( self , snake_case , snake_case , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase__ = np.array([re.sub(snake_case , "" , snake_case ) for x in predictions] )
UpperCamelCase__ = np.array([re.sub(snake_case , "" , snake_case ) for x in references] )
else:
UpperCamelCase__ = np.asarray(snake_case )
UpperCamelCase__ = np.asarray(snake_case )
if ignore_case:
UpperCamelCase__ = np.char.lower(snake_case )
UpperCamelCase__ = np.char.lower(snake_case )
if ignore_punctuation:
UpperCamelCase__ = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
if ignore_numbers:
UpperCamelCase__ = string.digits.maketrans("" , "" , string.digits )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = predictions == references
return {"exact_match": np.mean(snake_case ) * 100}
| 551
| 0
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 65_536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48_000,
'''sample_size''': 131_072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16_000,
'''sample_size''': 65_536,
},
}
def lowercase_ ( lowercase__ , lowercase__ ) ->List[Any]:
return torch.atana(lowercase__ , lowercase__ ) / math.pi * 2
def lowercase_ ( lowercase__ ) ->Any:
_snake_case: int = torch.sin(t * math.pi / 2 ) ** 2
_snake_case: List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase__ , lowercase__ )
class lowerCamelCase ( UpperCamelCase__ ):
pass
class lowerCamelCase ( nn.Module ):
def __init__( self : Tuple , __snake_case : Dict ):
'''simple docstring'''
super().__init__()
_snake_case: Dict = DiffusionAttnUnetaD(__snake_case , n_attn_layers=4 )
_snake_case: int = deepcopy(self.diffusion )
_snake_case: Dict = torch.quasirandom.SobolEngine(1 , scramble=__snake_case )
def lowercase_ ( lowercase__ ) ->List[str]:
_snake_case: Any = MODELS_MAP[model_name]['url']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
A = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
A = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
A = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
A = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
A = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def lowercase_ ( lowercase__ ) ->int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase_ ( lowercase__ ) ->Dict:
for key, value in ATTN_MAP.items():
if name.startswith(lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
return name.replace(lowercase__ , lowercase__ )
elif name.startswith(lowercase__ ):
return [name.replace(lowercase__ , lowercase__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def lowercase_ ( lowercase__ , lowercase__=13 ) ->List[Any]:
_snake_case: Optional[Any] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_snake_case: Tuple = 0
if string.startswith('net.3.' ):
depth += 1
_snake_case: Union[str, Any] = string[6:]
elif string.startswith('net.' ):
_snake_case: int = string[4:]
while string.startswith('main.7.' ):
depth += 1
_snake_case: Optional[int] = string[7:]
if string.startswith('main.' ):
_snake_case: Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
_snake_case: Any = string[:2]
_snake_case: Optional[int] = string[2:]
else:
_snake_case: Optional[Any] = string[0]
_snake_case: Optional[int] = string[1:]
if depth == max_depth:
_snake_case: Union[str, Any] = MID_NUM_TO_LAYER[layer_num]
_snake_case: List[str] = 'mid_block'
elif depth > 0 and int(lowercase__ ) < 7:
_snake_case: Tuple = DOWN_NUM_TO_LAYER[layer_num]
_snake_case: str = F'''down_blocks.{depth}'''
elif depth > 0 and int(lowercase__ ) > 7:
_snake_case: Optional[int] = UP_NUM_TO_LAYER[layer_num]
_snake_case: Dict = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_snake_case: Dict = DEPTH_0_TO_LAYER[layer_num]
_snake_case: Optional[int] = F'''up_blocks.{max_depth - 1}''' if int(lowercase__ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_snake_case: Union[str, Any] = string_left[1:]
if "resnets" in new_layer:
_snake_case: List[str] = convert_resconv_naming(lowercase__ )
elif "attentions" in new_layer:
_snake_case: Optional[Any] = convert_attn_naming(lowercase__ )
_snake_case: Dict = new_string_left
if not isinstance(lowercase__ , lowercase__ ):
_snake_case: List[Any] = prefix + '.' + new_layer + '.' + string_left
else:
_snake_case: Optional[Any] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowercase_ ( lowercase__ ) ->Optional[Any]:
_snake_case: Optional[int] = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_snake_case: List[Any] = rename(lowercase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase__ , lowercase__ ):
_snake_case: Union[str, Any] = transform_conv_attns(lowercase__ , lowercase__ , lowercase__ )
else:
_snake_case: Dict = v
return new_state_dict
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ ) ->Any:
if len(lowercase__ ) == 1:
if len(v.shape ) == 3:
# weight
_snake_case: List[str] = v[:, :, 0]
else:
# bias
_snake_case: int = v
else:
# qkv matrices
_snake_case: Union[str, Any] = v.shape[0]
_snake_case: Union[str, Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_snake_case: Optional[int] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_snake_case: Tuple = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_snake_case: Any = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_snake_case: Optional[int] = download(lowercase__ )
_snake_case: Optional[int] = MODELS_MAP[model_name]['sample_rate']
_snake_case: List[str] = MODELS_MAP[model_name]['sample_size']
_snake_case: str = Object()
_snake_case: Any = sample_size
_snake_case: Dict = sample_rate
_snake_case: List[Any] = 0
_snake_case: str = UNetaDModel(sample_size=lowercase__ , sample_rate=lowercase__ )
_snake_case: str = diffusers_model.state_dict()
_snake_case: str = DiffusionUncond(lowercase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowercase__ )['state_dict'] )
_snake_case: Dict = orig_model.diffusion_ema.eval()
_snake_case: int = orig_model.state_dict()
_snake_case: str = rename_orig_weights(lowercase__ )
_snake_case: Optional[int] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_snake_case: Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('kernel' ) for k in list(lowercase__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_snake_case: int = value.squeeze()
_snake_case: Optional[Any] = value
diffusers_model.load_state_dict(lowercase__ )
_snake_case: Any = 100
_snake_case: List[str] = 33
_snake_case: Any = IPNDMScheduler(num_train_timesteps=lowercase__ )
_snake_case: Optional[Any] = torch.manual_seed(lowercase__ )
_snake_case: Optional[int] = torch.randn([1, 2, config.sample_size] , generator=lowercase__ ).to(lowercase__ )
_snake_case: Dict = torch.linspace(1 , 0 , steps + 1 , device=lowercase__ )[:-1]
_snake_case: Optional[int] = get_crash_schedule(lowercase__ )
_snake_case: Any = DanceDiffusionPipeline(unet=lowercase__ , scheduler=lowercase__ )
_snake_case: Dict = torch.manual_seed(33 )
_snake_case: int = pipe(num_inference_steps=lowercase__ , generator=lowercase__ ).audios
_snake_case: str = sampling.iplms_sample(lowercase__ , lowercase__ , lowercase__ , {} )
_snake_case: Any = generated.clamp(-1 , 1 )
_snake_case: Optional[Any] = (generated - audio).abs().sum()
_snake_case: Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , lowercase__ )
print('Diff max' , lowercase__ )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
A = parser.parse_args()
main(args)
| 717
|
'''simple docstring'''
A : List[str] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 273
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = int(number**0.5 )
return number == sq * sq
def a__ ( A__, A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ : Any = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ : List[Any] = gcd(a__, a__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( A__ = 3_5 ):
SCREAMING_SNAKE_CASE_ : Dict = set()
SCREAMING_SNAKE_CASE_ : Any = 4_2
SCREAMING_SNAKE_CASE_ : Dict = Fraction(0 )
SCREAMING_SNAKE_CASE_ : Tuple = 4_2
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ : Any = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ : Tuple = x_den * y_den
SCREAMING_SNAKE_CASE_ : int = gcd(a__, a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : Optional[Any] = add_three(
a__, a__, a__, a__, a__, a__ )
unique_s.add(a__ )
# n=2
SCREAMING_SNAKE_CASE_ : str = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = x_den * x_den * y_den * y_den
if is_sq(a__ ) and is_sq(a__ ):
SCREAMING_SNAKE_CASE_ : int = int(sqrt(a__ ) )
SCREAMING_SNAKE_CASE_ : str = int(sqrt(a__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = gcd(a__, a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : List[Any] = add_three(
a__, a__, a__, a__, a__, a__ )
unique_s.add(a__ )
# n=-1
SCREAMING_SNAKE_CASE_ : Dict = x_num * y_num
SCREAMING_SNAKE_CASE_ : Union[str, Any] = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ : Optional[int] = gcd(a__, a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : int = add_three(
a__, a__, a__, a__, a__, a__ )
unique_s.add(a__ )
# n=2
SCREAMING_SNAKE_CASE_ : List[str] = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a__ ) and is_sq(a__ ):
SCREAMING_SNAKE_CASE_ : int = int(sqrt(a__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = int(sqrt(a__ ) )
SCREAMING_SNAKE_CASE_ : int = gcd(a__, a__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ : List[Any] = add_three(
a__, a__, a__, a__, a__, a__ )
unique_s.add(a__ )
for num, den in unique_s:
total += Fraction(a__, a__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''swinv2'''
snake_case__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , __UpperCamelCase : List[str]=224 , __UpperCamelCase : Any=4 , __UpperCamelCase : int=3 , __UpperCamelCase : Tuple=96 , __UpperCamelCase : Union[str, Any]=[2, 2, 6, 2] , __UpperCamelCase : List[Any]=[3, 6, 12, 24] , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[str]=4.0 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Any=1E-5 , __UpperCamelCase : Optional[Any]=32 , **__UpperCamelCase : Any , ) -> List[Any]:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 420
| 0
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = 9
_UpperCamelCase : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase : List[str] = kruskal(lowerCAmelCase__ ,lowerCAmelCase__ )
_UpperCamelCase : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCAmelCase__ ) == sorted(lowerCAmelCase__ )
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
'''simple docstring'''
from string import ascii_uppercase
_lowercase : Dict = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
if isinstance(A , A ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(A , A ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(A , A ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
UpperCAmelCase = ''''''
UpperCAmelCase = 0
UpperCAmelCase = 0
while div != 1:
UpperCAmelCase , UpperCAmelCase = divmod(A , A )
if base >= 11 and 9 < mod < 36:
UpperCAmelCase = ALPHABET_VALUES[str(A )]
else:
UpperCAmelCase = str(A )
new_value += actual_value
UpperCAmelCase = num // base
UpperCAmelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(A )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 210
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[Any] = MobileBertTokenizer
__magic_name__ : str = MobileBertTokenizerFast
__magic_name__ : Optional[int] = True
__magic_name__ : List[Any] = True
__magic_name__ : Dict = filter_non_english
__magic_name__ : str = "google/mobilebert-uncased"
def a__( self : Dict )-> Any:
"""simple docstring"""
super().setUp()
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a__( self : Any , lowerCAmelCase : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__( self : str )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase )
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a__( self : Any )-> int:
"""simple docstring"""
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase = {}
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a__( self : Optional[int] )-> int:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__( self : Any )-> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase = tokenizer_r.encode_plus(
lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase , '''do_lower_case''' ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = ['''的''', '''人''', '''有''']
UpperCAmelCase = ''''''.join(lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase )
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 210
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a_ : List[Any] , a_ : str=13 , a_ : Optional[int]=7 , a_ : int=True , a_ : Tuple=True , a_ : Tuple=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=99 , a_ : Tuple=32 , a_ : List[Any]=2 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=37 , a_ : List[Any]="gelu" , a_ : List[Any]=0.1 , a_ : List[str]=0.1 , a_ : List[Any]=5_12 , a_ : Union[str, Any]=16 , a_ : List[Any]=2 , a_ : Dict=0.0_2 , a_ : Union[str, Any]=3 , a_ : Tuple=4 , a_ : str=None , ):
'''simple docstring'''
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = 13
__UpperCAmelCase : List[str] = 7
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Any = 99
__UpperCAmelCase : int = 32
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : Optional[int] = 4
__UpperCAmelCase : List[str] = 37
__UpperCAmelCase : List[Any] = '''gelu'''
__UpperCAmelCase : str = 0.1
__UpperCAmelCase : List[str] = 0.1
__UpperCAmelCase : str = 5_12
__UpperCAmelCase : Optional[Any] = 16
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : int = 0.0_2
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : List[str] = None
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : int , a_ : Optional[Any] , a_ : Any , a_ : Dict , a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = TFRoFormerModel(config=a_ )
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : Dict = [input_ids, input_mask]
__UpperCAmelCase : Union[str, Any] = model(a_ )
__UpperCAmelCase : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , a_ : List[str] , a_ : List[str] , a_ : str , a_ : Union[str, Any] , a_ : List[str] , a_ : int , a_ : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : str = TFRoFormerForCausalLM(config=a_ )
__UpperCAmelCase : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : int = model(a_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def snake_case__ ( self : Tuple , a_ : Any , a_ : List[Any] , a_ : Any , a_ : Any , a_ : Union[str, Any] , a_ : str , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : int = TFRoFormerForMaskedLM(config=a_ )
__UpperCAmelCase : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Union[str, Any] , a_ : Tuple , a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : List[Any] = TFRoFormerForSequenceClassification(config=a_ )
__UpperCAmelCase : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Optional[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : Union[str, Any] = TFRoFormerForMultipleChoice(config=a_ )
__UpperCAmelCase : Tuple = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[str] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCAmelCase : Any = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , a_ : Optional[int] , a_ : List[Any] , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : List[str] = TFRoFormerForTokenClassification(config=a_ )
__UpperCAmelCase : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Optional[int] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int , a_ : Dict , a_ : Optional[Any] , a_ : Optional[Any] , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = TFRoFormerForQuestionAnswering(config=a_ )
__UpperCAmelCase : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : str = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : List[str] = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : Tuple , a_ : int , a_ : Optional[Any] , a_ : List[str] , a_ : Dict , a_ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = TFRoFormerModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def snake_case__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*a_ )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(a_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Optional[Any] = model(a_ )[0]
# TODO Replace vocab size
__UpperCAmelCase : int = 5_00_00
__UpperCAmelCase : Dict = [1, 6, vocab_size]
self.assertEqual(output.shape , a_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCAmelCase : Dict = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a_ , atol=1e-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 1E-4
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = tf.constant([[4, 10]] )
__UpperCAmelCase : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCAmelCase : List[Any] = emba(input_ids.shape )
__UpperCAmelCase : List[Any] = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(a_ , a_ , atol=self.tolerance )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
__UpperCAmelCase : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__UpperCAmelCase : Optional[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(a_ , a_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 1E-4
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__UpperCAmelCase : str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__UpperCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCAmelCase : str = embed_positions([2, 16, 7_68] )[None, None, :, :]
__UpperCAmelCase : Optional[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
a_ , a_ , a_ )
__UpperCAmelCase : Any = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
__UpperCAmelCase : Optional[Any] = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , a_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , a_ , atol=self.tolerance )
| 717
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__A =pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
'''simple docstring'''
inspect_dataset(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
'''simple docstring'''
inspect_metric(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = path + '''.py'''
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : str = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
__UpperCAmelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__UpperCAmelCase : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
__UpperCAmelCase : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
| 241
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ):
'''simple docstring'''
super().__init__()
__a = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__a = None
__a = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :VQModel
a_ :CLIPTextModel
a_ :CLIPTokenizer
a_ :TransformeraDModel
a_ :LearnedClassifierFreeSamplingEmbeddings
a_ :VQDiffusionScheduler
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__a = text_input_ids[:, : self.tokenizer.model_max_length]
__a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__a = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a = self.learned_classifier_free_sampling_embeddings.embeddings
__a = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__a = [""""""] * batch_size
__a = text_input_ids.shape[-1]
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__a = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a = negative_prompt_embeds.shape[1]
__a = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__a = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = batch_size * num_images_per_prompt
__a = guidance_scale > 1.0
__a = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__a = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a = self.transformer.num_vector_embeds - 1
__a = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__a = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__a = self.scheduler.timesteps.to(self.device )
__a = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__a = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__a , __a = model_output.chunk(2 )
__a = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__a = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__a = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = self.vqvae.config.vq_embed_dim
__a = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__a = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
__a , __a = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__a = torch.exp(SCREAMING_SNAKE_CASE__ )
__a = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__a = torch.cat((all_true, keep_mask) , dim=1 )
__a = keep_mask[:, :-1, :]
__a = keep_mask.gather(1 , indices.argsort(1 ) )
__a = log_p_x_0.clone()
__a = -torch.inf # -inf = log(0)
return rv
| 582
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE_ = 'src/diffusers'
SCREAMING_SNAKE_CASE_ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE_ = spec.loader.load_module()
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , __SCREAMING_SNAKE_CASE ) is not None
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__a = object_name.split(""".""" )
__a = 0
# First let's find the module where our object lives.
__a = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , F'''{module}.py''' ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
__a = os.path.join(__SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(__SCREAMING_SNAKE_CASE , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__a = f.readlines()
# Now let's find the class / func in the code!
__a = """"""
__a = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__a = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
SCREAMING_SNAKE_CASE_ = re.compile(R'<FILL\s+[^>]*>')
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__a = code.split("""\n""" )
__a = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__a = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
__a = F'''class Bla:\n{code}'''
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__SCREAMING_SNAKE_CASE )
__a = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
__a , __a = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__a = f.readlines()
__a = []
__a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
__a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__a , __a , __a = search.groups()
__a = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
__a = get_indent(__SCREAMING_SNAKE_CASE )
__a = line_index + 1 if indent == theoretical_indent else line_index + 2
__a = theoretical_indent
__a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__a = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
__a = lines[line_index]
__a = _should_continue(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and re.search(F'''^{indent}# End copy''' , __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
__a = """""".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
__a = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
__a = """\n""".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
__a = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__a = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__a , __a , __a = pattern.groups()
__a = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
__a = re.sub(obja.lower() , obja.lower() , __SCREAMING_SNAKE_CASE )
__a = re.sub(obja.upper() , obja.upper() , __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__a = blackify(lines[start_index - 1] + theoretical_code )
__a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__a = lines[:start_index] + [theoretical_code] + lines[line_index:]
__a = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def __lowercase ( __SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
__a = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , """**/*.py""" ) , recursive=__SCREAMING_SNAKE_CASE )
__a = []
for filename in all_files:
__a = is_copy_consistent(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
__a = """\n""".join(__SCREAMING_SNAKE_CASE )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 582
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( UpperCamelCase_ , unittest.TestCase ):
A_ : Any = KandinskyVaaPriorPipeline
A_ : Optional[int] = ['prompt']
A_ : Tuple = ['prompt', 'negative_prompt']
A_ : List[str] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
A_ : Optional[int] = False
@property
def __lowerCamelCase ( self : Optional[int] ) -> Dict:
return 32
@property
def __lowerCamelCase ( self : Dict ) -> Any:
return 32
@property
def __lowerCamelCase ( self : Dict ) -> Any:
return self.time_input_dim
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self : Any ) -> int:
return 1_00
@property
def __lowerCamelCase ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ :List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
SCREAMING_SNAKE_CASE__ :Optional[int] = PriorTransformer(**UpperCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
SCREAMING_SNAKE_CASE__ :Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCamelCase ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
SCREAMING_SNAKE_CASE__ :int = CLIPVisionModelWithProjection(UpperCamelCase_ )
return model
@property
def __lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
def __lowerCamelCase ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ :List[str] = self.dummy_prior
SCREAMING_SNAKE_CASE__ :Tuple = self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ :int = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ :Optional[int] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ :Dict = self.dummy_image_processor
SCREAMING_SNAKE_CASE__ :List[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=UpperCamelCase_ , clip_sample_range=10.0 , )
SCREAMING_SNAKE_CASE__ :List[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str]=0 ) -> List[str]:
if str(UpperCamelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ :List[str] = torch.manual_seed(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ :int = '''cpu'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :Dict = self.pipeline_class(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :Optional[Any] = output.image_embeds
SCREAMING_SNAKE_CASE__ :Any = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
SCREAMING_SNAKE_CASE__ :Tuple = image[0, -10:]
SCREAMING_SNAKE_CASE__ :Optional[int] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
SCREAMING_SNAKE_CASE__ :str = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCamelCase ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ :List[str] = True
SCREAMING_SNAKE_CASE__ :Any = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
@skip_mps
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :str = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE__ :Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
| 719
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Tuple = ['pixel_values']
def __init__( self : Optional[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Dict[str, int]] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_55 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : str , ) -> None:
super().__init__(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = size if size is not None else {'shortest_edge': 2_56}
SCREAMING_SNAKE_CASE__ :Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
SCREAMING_SNAKE_CASE__ :Dict = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
SCREAMING_SNAKE_CASE__ :Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ :List[Any] = size
SCREAMING_SNAKE_CASE__ :str = resample
SCREAMING_SNAKE_CASE__ :Dict = do_center_crop
SCREAMING_SNAKE_CASE__ :List[Any] = crop_size
SCREAMING_SNAKE_CASE__ :List[str] = do_rescale
SCREAMING_SNAKE_CASE__ :List[Any] = rescale_factor
SCREAMING_SNAKE_CASE__ :int = do_normalize
SCREAMING_SNAKE_CASE__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ :Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ :str = get_resize_output_image_size(UpperCamelCase_ , size=size['shortest_edge'] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ :Dict = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any ) -> np.ndarray:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : int , ) -> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Any , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ :int = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ :int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ :Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ :Optional[int] = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
SCREAMING_SNAKE_CASE__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ :Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ :List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ :Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ :List[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ :Optional[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ :List[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ :List[str] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ :str = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Tuple] = None ) -> str:
SCREAMING_SNAKE_CASE__ :Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ :Any = []
for idx in range(len(UpperCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ :List[str] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ :Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 320
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCamelCase = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , a , a , a = None , a = None ) -> Any:
"""simple docstring"""
_A = None
_A = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_A = os.path.abspath('''examples''' )
for item in os.listdir(a ):
if item not in EXCLUDE_EXAMPLES:
_A = os.path.join(a , a )
if os.path.isfile(a ) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_A = compare_against_test(
os.path.join(a , a ) , a , a , a )
_A = '''\n'''.join(a )
if special_strings is not None:
for string in special_strings:
_A = diff.replace(a , '''''' )
self.assertEqual(a , '''''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , a )
self.one_complete_example('''complete_nlp_example.py''' , a )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_A = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , a , a , a )
self.one_complete_example('''complete_cv_example.py''' , a , a , a )
@mock.patch.dict(os.environ ,{'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = False
@classmethod
def lowercase_ ( cls ) -> Dict:
"""simple docstring"""
super().setUpClass()
_A = tempfile.mkdtemp()
_A = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_A = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase_ ( cls ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_A = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
_A = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_A = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
_A = run_command(self._launch_args + testargs , return_stdout=a )
self.assertNotIn('''epoch 0:''' , a )
self.assertIn('''epoch 1:''' , a )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
_A = run_command(self._launch_args + testargs , return_stdout=a )
if torch.cuda.is_available():
_A = torch.cuda.device_count()
else:
_A = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , a )
self.assertIn('''epoch 1:''' , a )
else:
self.assertIn('''epoch 0:''' , a )
self.assertIn('''epoch 1:''' , a )
@slow
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_A = run_command(self._launch_args + testargs , return_stdout=a )
_A = re.findall('''({.+})''' , a )
_A = [r for r in results if '''accuracy''' in r][-1]
_A = ast.literal_eval(a )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_A = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_A = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a , '''tracking''' ) ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 317
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317
| 1
|
from __future__ import annotations
import math
snake_case__ = '''2020.9.26'''
snake_case__ = '''xcodz-dot, cclaus, dhruvmanila'''
def lowerCamelCase__ ( a : float , a : float , a : float , a : float , a : float ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(a , (float, int) ) for val in locals().values() ):
a__ :str = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(a )
a__ :int = ((x * distance) / (z + distance)) * scale
a__ :List[str] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( a : float , a : float , a : float , a : str , a : float ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError("Axis must be a str" )
a__ :Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(a , (float, int) ) for val in input_variables.values() ):
a__ :Optional[Any] = (
"Input values except axis must either be float or int: "
F'''{list(input_variables.values() )}'''
)
raise TypeError(a )
a__ :Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
a__ :Dict = x * math.cos(a ) - y * math.sin(a )
a__ :Optional[Any] = y * math.cos(a ) + x * math.sin(a )
a__ :Tuple = z
elif axis == "x":
a__ :List[Any] = y * math.cos(a ) - z * math.sin(a )
a__ :List[Any] = z * math.cos(a ) + y * math.sin(a )
a__ :Any = x
elif axis == "y":
a__ :Dict = x * math.cos(a ) - z * math.sin(a )
a__ :List[str] = z * math.cos(a ) + x * math.sin(a )
a__ :Any = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 373
|
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ = '''1'''
snake_case__ = '''0'''
snake_case__ = '''1'''
snake_case__ = ort.SessionOptions()
snake_case__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ = ort.RunOptions()
snake_case__ = 128
snake_case__ = 1
snake_case__ = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ = time.time()
snake_case__ = 2000
snake_case__ = {}
for iter in range(max_iters):
snake_case__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 373
| 1
|
"""simple docstring"""
from string import ascii_uppercase
UpperCAmelCase ={char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase =dict(enumerate(ascii_uppercase))
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a )
A = 0
while True:
if x == i:
A = 0
if len(_a ) == len(_a ):
break
key += key[i]
i += 1
return key
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = """"""
A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
A = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = """"""
A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
A = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def _A ( ):
"""simple docstring"""
A = """THE GERMAN ATTACK"""
A = """SECRET"""
A = generate_key(_a , _a )
A = cipher_text(_a , _a )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(_a , _a )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 617
|
"""simple docstring"""
UpperCAmelCase =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( _a : Dict , _a : int , _a : Dict , _a : List[Any] ):
"""simple docstring"""
A = [False] * len(_a )
A = [s]
A = True
while queue:
A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
A = True
A = u
return visited[t]
def _A ( _a : str , _a : Optional[Any] , _a : Optional[int] ):
"""simple docstring"""
A = [-1] * (len(_a ))
A = 0
A = []
A = [i[:] for i in graph] # Record original cut, copy.
while bfs(_a , _a , _a , _a ):
A = float("""Inf""" )
A = sink
while s != source:
# Find the minimum value in select path
A = min(_a , graph[parent[s]][s] )
A = parent[s]
max_flow += path_flow
A = sink
while v != source:
A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A = parent[v]
for i in range(len(_a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 617
| 1
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE = 'Hello world! cécé herlolip'
SCREAMING_SNAKE_CASE = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _lowerCamelCase ( __A : Any , __A : Tuple ) -> Any:
_UpperCAmelCase : Union[str, Any] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=__A , large=__A , share_emb=__A , use_bert_emb=__A , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
_UpperCAmelCase : Optional[int] = torch.load(__A , lambda __A , __A : storage )
_UpperCAmelCase : List[Any] = AbsSummarizer(__A , torch.device('''cpu''' ) , __A )
original.eval()
_UpperCAmelCase : List[str] = BertAbsSummarizer(__A , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
_UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
_UpperCAmelCase : str = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) )
_UpperCAmelCase : int = torch.tensor(__A ).unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__A )) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(__A ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_UpperCAmelCase : Dict = encoder_input_ids
_UpperCAmelCase : Tuple = decoder_input_ids
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_UpperCAmelCase : Union[str, Any] = original(__A , __A , __A , __A , __A , __A , __A )[0]
_UpperCAmelCase : Tuple = original.generator(__A )
_UpperCAmelCase : Union[str, Any] = new_model(
__A , __A , __A , __A , __A )[0]
_UpperCAmelCase : int = new_model.generator(__A )
_UpperCAmelCase : Dict = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__A ) )
_UpperCAmelCase : Optional[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__A ) )
_UpperCAmelCase : Optional[Any] = torch.allclose(__A , __A , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 186
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : str = BloomTokenizerFast
_SCREAMING_SNAKE_CASE : str = BloomTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[int] = "tokenizer_file"
_SCREAMING_SNAKE_CASE : Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def snake_case__ ( self) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self , **_A) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_A)
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : int = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
_UpperCAmelCase : List[Any] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
_UpperCAmelCase : Any = tokenizer.batch_encode_plus(_A)['''input_ids''']
self.assertListEqual(_A , _A)
_UpperCAmelCase : Any = tokenizer.batch_decode(_A)
self.assertListEqual(_A , _A)
def snake_case__ ( self , _A=6) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase : List[Any] = '''This is a simple input'''
_UpperCAmelCase : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : int = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_A , max_length=_A)
tokenizer_r.encode_plus(_A , max_length=_A)
tokenizer_r.batch_encode_plus(_A , max_length=_A)
tokenizer_r.encode(_A , max_length=_A)
tokenizer_r.batch_encode_plus(_A , max_length=_A)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
_UpperCAmelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''')
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''')
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''')
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''')
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_A)
_UpperCAmelCase : Tuple = next(iter(_A))['''premise'''] # pick up one data
_UpperCAmelCase : List[Any] = list(sample_data.values())
_UpperCAmelCase : Any = list(map(tokenizer.encode , _A))
_UpperCAmelCase : List[str] = [tokenizer.decode(_A , clean_up_tokenization_spaces=_A) for x in output_tokens]
self.assertListEqual(_A , _A)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 186
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase ( __a ):
if num <= 0:
SCREAMING_SNAKE_CASE_ = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = [True] * (num + 1)
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = int(math.sqrt(__a ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__a )
# Set multiples of start be False
for i in range(start * start, num + 1, __a ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE_ = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(__a )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 626
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a, __a, __a ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = to_pil_image(__a )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pil_image.size
SCREAMING_SNAKE_CASE_ = pytesseract.image_to_data(__a, lang=__a, output_type='''dict''', config=__a )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE_ = [idx for idx, word in enumerate(__a ) if not word.strip()]
SCREAMING_SNAKE_CASE_ = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE_ = []
for x, y, w, h in zip(__a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a, __a, __a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( __lowercase ):
UpperCAmelCase__ = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "" , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_value
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE_ = apply_ocr
SCREAMING_SNAKE_CASE_ = ocr_lang
SCREAMING_SNAKE_CASE_ = tesseract_config
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ = (size['''height'''], size['''width'''])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE_ = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for image in images:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
SCREAMING_SNAKE_CASE_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
SCREAMING_SNAKE_CASE_ = words_batch
SCREAMING_SNAKE_CASE_ = boxes_batch
return data
| 626
| 1
|
import os
from distutils.util import strtobool
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for e in env_keys:
__lowercase = int(os.environ.get(_UpperCamelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
__lowercase = os.environ.get(_UpperCamelCase , str(_UpperCamelCase ) )
return strtobool(_UpperCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase_ ( _UpperCamelCase , _UpperCamelCase="no" ):
'''simple docstring'''
__lowercase = os.environ.get(_UpperCamelCase , str(_UpperCamelCase ) )
return value
| 527
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *_UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
__lowercase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( _UpperCamelCase = None , _UpperCamelCase = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
__lowercase = starting_batch_size
def decorator(*_UpperCamelCase , **_UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
__lowercase = ''', '''.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 527
| 1
|
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 53
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( _a , _a ) -> str | Literal[False]:
'''simple docstring'''
lowercase_ :str = list(_a )
lowercase_ :Dict = list(_a )
lowercase_ :int = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count += 1
lowercase_ :List[str] = '''_'''
if count > 1:
return False
else:
return "".join(_a )
def UpperCamelCase ( _a ) -> list[str]:
'''simple docstring'''
lowercase_ :List[Any] = []
while True:
lowercase_ :List[Any] = ['''$'''] * len(_a )
lowercase_ :Any = []
for i in range(len(_a ) ):
for j in range(i + 1 , len(_a ) ):
lowercase_ :Optional[Any] = compare_string(binary[i] , binary[j] )
if k is False:
lowercase_ :str = '''*'''
lowercase_ :Any = '''*'''
temp.append('''X''' )
for i in range(len(_a ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_a ) == 0:
return pi
lowercase_ :int = list(set(_a ) )
def UpperCamelCase ( _a , _a ) -> list[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = []
for minterm in minterms:
lowercase_ :str = ''''''
for _ in range(_a ):
lowercase_ :Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(_a )
return temp
def UpperCamelCase ( _a , _a , _a ) -> bool:
'''simple docstring'''
lowercase_ :Dict = list(_a )
lowercase_ :Dict = list(_a )
lowercase_ :Dict = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( _a , _a ) -> list[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = []
lowercase_ :Optional[Any] = [0] * len(_a )
for i in range(len(chart[0] ) ):
lowercase_ :int = 0
lowercase_ :List[Any] = -1
for j in range(len(_a ) ):
if chart[j][i] == 1:
count += 1
lowercase_ :Dict = j
if count == 1:
lowercase_ :Any = 1
for i in range(len(_a ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_a ) ):
lowercase_ :str = 0
temp.append(prime_implicants[i] )
while True:
lowercase_ :List[Any] = 0
lowercase_ :Any = -1
lowercase_ :Any = 0
for i in range(len(_a ) ):
lowercase_ :str = chart[i].count(1 )
if count_n > max_n:
lowercase_ :List[Any] = count_n
lowercase_ :str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_a ) ):
lowercase_ :Dict = 0
def UpperCamelCase ( _a , _a ) -> list[list[int]]:
'''simple docstring'''
lowercase_ :List[Any] = [[0 for x in range(len(_a ) )] for x in range(len(_a ) )]
for i in range(len(_a ) ):
lowercase_ :List[Any] = prime_implicants[i].count('''_''' )
for j in range(len(_a ) ):
if is_for_table(prime_implicants[i] , binary[j] , _a ):
lowercase_ :str = 1
return chart
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Dict = int(input('''Enter the no. of variables\n''' ) )
lowercase_ :int = [
float(_a )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowercase_ :Tuple = decimal_to_binary(_a , _a )
lowercase_ :str = check(_a )
print('''Prime Implicants are:''' )
print(_a )
lowercase_ :Union[str, Any] = prime_implicant_chart(_a , _a )
lowercase_ :int = selection(_a , _a )
print('''Essential Prime Implicants are:''' )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 257
| 0
|
'''simple docstring'''
from math import ceil, sqrt
def __snake_case ( UpperCAmelCase_ : int = 1000000 ):
lowerCamelCase_ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase_ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=30 , UpperCamelCase=400 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=1 / 255 , UpperCamelCase=True , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=[0.5, 0.5, 0.5] , UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def snake_case ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
lowerCamelCase_ ,lowerCamelCase_ = image.size
else:
lowerCamelCase_ ,lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ ,lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
lowerCamelCase_ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = DetrImageProcessor if is_vision_available() else None
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def snake_case ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_pad" ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase_ ,lowerCamelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase_ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase ) )
# verify masks
lowerCamelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase ) )
| 445
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: str=16 ,__lowerCAmelCase: Any=[1, 2, 1] ,__lowerCAmelCase: List[Any]=[2, 2, 4] ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[str]=2.0 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-5 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: int=8 ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : Optional[int] = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : Union[str, Any] = mlp_ratio
_lowerCamelCase : Tuple = qkv_bias
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Optional[int] = use_absolute_embeddings
_lowerCamelCase : Optional[int] = patch_norm
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = scope
_lowerCamelCase : Any = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Dict = encoder_stride
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Tuple ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = SwinvaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
_lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = SwinvaForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Optional[Any] = SwinvaForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.type_sequence_label_size
_lowerCamelCase : Tuple = SwinvaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = SwinvaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,embed_dim=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.attentions
_lowerCamelCase : int = len(self.model_tester.depths )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : int = True
_lowerCamelCase : Any = config.window_size**2
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
_lowerCamelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCamelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# Swinv2 has a different seq_length
_lowerCamelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__lowerCAmelCase ,__lowerCAmelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Tuple = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = SwinvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(config=__lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase( yaml.SafeLoader ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
__snake_case = [tuple(SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else key for key in keys]
__snake_case = Counter(SCREAMING_SNAKE_CASE )
__snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]:
'''simple docstring'''
__snake_case = super().construct_mapping(SCREAMING_SNAKE_CASE , deep=SCREAMING_SNAKE_CASE )
self._check_no_duplicates_on_constructed_node(SCREAMING_SNAKE_CASE )
return mapping
def _lowerCAmelCase ( _lowerCAmelCase ) -> Tuple[Optional[str], str]:
'''simple docstring'''
__snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__snake_case = full_content[1:].index("---" ) + 1
__snake_case = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_lowerCAmelCase )
class UpperCamelCase( _a ):
# class attributes
snake_case_ : Union[str, Any] = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as readme_file:
__snake_case , __snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(SCREAMING_SNAKE_CASE )
else:
return cls()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Path ) -> Optional[int]:
'''simple docstring'''
if path.exists():
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as readme_file:
__snake_case = readme_file.read()
else:
__snake_case = None
__snake_case = self._to_readme(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
__snake_case , __snake_case = _split_yaml_from_readme(SCREAMING_SNAKE_CASE )
__snake_case = "---\n" + self.to_yaml_string() + "---\n" + content
else:
__snake_case = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> "DatasetMetadata":
'''simple docstring'''
__snake_case = yaml.load(SCREAMING_SNAKE_CASE , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__snake_case = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE , encoding="utf-8" , ).decode("utf-8" )
A : Dict = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A : Optional[int] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
A : Optional[Any] = ap.parse_args()
A : Any = Path(args.readme_filepath)
A : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 473
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__snake_case = deepcopy(SCREAMING_SNAKE_CASE )
elif os.path.exists(SCREAMING_SNAKE_CASE ):
with io.open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(SCREAMING_SNAKE_CASE )
else:
try:
__snake_case = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE ).decode("utf-8" )
__snake_case = json.loads(SCREAMING_SNAKE_CASE )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__snake_case = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case = self.get_value("zero_optimization.stage" , -1 )
# offload
__snake_case = False
if self.is_zeroa() or self.is_zeroa():
__snake_case = set(["cpu", "nvme"] )
__snake_case = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__snake_case = True
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
'''simple docstring'''
__snake_case = self.config
# find the config node of interest if it exists
__snake_case = ds_key_long.split("." )
__snake_case = nodes.pop()
for node in nodes:
__snake_case = config.get(SCREAMING_SNAKE_CASE )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple=None ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.find_config_node(SCREAMING_SNAKE_CASE )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.config
# find the config node of interest if it exists
__snake_case = ds_key_long.split("." )
for node in nodes:
__snake_case = config
__snake_case = config.get(SCREAMING_SNAKE_CASE )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.get_value(SCREAMING_SNAKE_CASE )
return False if value is None else bool(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.get_value(SCREAMING_SNAKE_CASE )
return False if value is None else not bool(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self._offload
class UpperCamelCase:
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = engine
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
self.engine.backward(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase( _a ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , device_placement=SCREAMING_SNAKE_CASE , scaler=SCREAMING_SNAKE_CASE )
__snake_case = hasattr(self.optimizer , "overflow" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=None ) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase( _a ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase:
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=0.001 , SCREAMING_SNAKE_CASE : Tuple=0 , **SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = params
__snake_case = lr
__snake_case = weight_decay
__snake_case = kwargs
class UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=0 , **SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case = optimizer
__snake_case = total_num_steps
__snake_case = warmup_num_steps
__snake_case = kwargs
| 473
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 2_55 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : str = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
__UpperCamelCase : Any = parent
__UpperCamelCase : Any = batch_size
__UpperCamelCase : Tuple = num_channels
__UpperCamelCase : str = min_resolution
__UpperCamelCase : int = max_resolution
__UpperCamelCase : List[Any] = do_resize
__UpperCamelCase : Optional[int] = size
__UpperCamelCase : Tuple = do_rescale
__UpperCamelCase : Dict = rescale_factor
__UpperCamelCase : List[str] = do_normalize
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Union[str, Any] = image_std
__UpperCamelCase : List[str] = do_pad
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ) -> str:
'''simple docstring'''
if not batched:
__UpperCamelCase : Tuple = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : List[str] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase : Optional[Any] = int(self.size["shortest_edge"] * h / w )
__UpperCamelCase : int = self.size["shortest_edge"]
elif w > h:
__UpperCamelCase : Union[str, Any] = self.size["shortest_edge"]
__UpperCamelCase : Tuple = int(self.size["shortest_edge"] * w / h )
else:
__UpperCamelCase : Optional[Any] = self.size["shortest_edge"]
__UpperCamelCase : Dict = self.size["shortest_edge"]
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__UpperCamelCase : str = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__UpperCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_pad" ) )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__UpperCamelCase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__UpperCamelCase : Dict = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Dict = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__UpperCamelCase : Dict = json.loads(f.read() )
__UpperCamelCase : Tuple = {"image_id": 3_97_69, "annotations": target}
# encode them
__UpperCamelCase : int = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
__UpperCamelCase : List[Any] = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
__UpperCamelCase : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
__UpperCamelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase : Optional[int] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
__UpperCamelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
__UpperCamelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase : Optional[int] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
__UpperCamelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
__UpperCamelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify orig_size
__UpperCamelCase : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
__UpperCamelCase : Any = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__UpperCamelCase : Tuple = json.loads(f.read() )
__UpperCamelCase : Dict = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
__UpperCamelCase : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__UpperCamelCase : Optional[Any] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
__UpperCamelCase : int = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
__UpperCamelCase : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
__UpperCamelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
__UpperCamelCase : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
__UpperCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
__UpperCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
__UpperCamelCase : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify masks
__UpperCamelCase : int = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCamelCase )
# verify orig_size
__UpperCamelCase : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
__UpperCamelCase : Union[str, Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
| 327
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[torch.FloatTensor] = None
lowercase : torch.FloatTensor = None
lowercase : Optional[Tuple[torch.FloatTensor]] = None
lowercase : Optional[Tuple[torch.FloatTensor]] = None
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=5_12 , __UpperCamelCase="cls" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : str = project_dim
__UpperCamelCase : Union[str, Any] = pooler_fn
__UpperCamelCase : List[Any] = learn_encoder
__UpperCamelCase : Union[str, Any] = use_attention_mask
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Dict = [R'pooler', R'logit_scale']
lowercase : Optional[int] = [R'position_ids', R'predictions.decoder.bias']
lowercase : str = 'roberta'
lowercase : int = RobertaSeriesConfig
def __init__( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
super().__init__(__UpperCamelCase )
__UpperCamelCase : List[Any] = XLMRobertaModel(__UpperCamelCase )
__UpperCamelCase : int = nn.Linear(config.hidden_size , config.project_dim )
__UpperCamelCase : str = getattr(__UpperCamelCase , "has_pre_transformation" , __UpperCamelCase )
if self.has_pre_transformation:
__UpperCamelCase : int = nn.Linear(config.hidden_size , config.project_dim )
__UpperCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __lowerCamelCase ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Optional[Any] = self.base_model(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , position_ids=__UpperCamelCase , head_mask=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCamelCase , )
if self.has_pre_transformation:
__UpperCamelCase : Any = outputs["hidden_states"][-2]
__UpperCamelCase : Tuple = self.pre_LN(__UpperCamelCase )
__UpperCamelCase : str = self.transformation_pre(__UpperCamelCase )
return TransformationModelOutput(
projection_state=__UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__UpperCamelCase : int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 327
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "gptsan-japanese"
lowercase = [
"past_key_values",
]
lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , snake_case_ : Dict=36_000 , snake_case_ : int=1_280 , snake_case_ : Union[str, Any]=1_024 , snake_case_ : int=8_192 , snake_case_ : Union[str, Any]=4_096 , snake_case_ : List[Any]=128 , snake_case_ : Tuple=10 , snake_case_ : Tuple=0 , snake_case_ : Optional[Any]=16 , snake_case_ : Optional[int]=16 , snake_case_ : List[Any]=128 , snake_case_ : Any=0.0 , snake_case_ : Tuple=1E-5 , snake_case_ : Optional[Any]=False , snake_case_ : int=0.0 , snake_case_ : Any="float32" , snake_case_ : Tuple=False , snake_case_ : Optional[Any]=False , snake_case_ : Dict=False , snake_case_ : Any=0.002 , snake_case_ : Tuple=False , snake_case_ : List[str]=True , snake_case_ : Any=35_998 , snake_case_ : Tuple=35_995 , snake_case_ : Dict=35_999 , **snake_case_ : Tuple , ):
snake_case__ : Tuple = vocab_size
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : List[Any] = d_model
snake_case__ : List[str] = d_ff
snake_case__ : Tuple = d_ext
snake_case__ : List[Any] = d_spout
snake_case__ : List[str] = num_switch_layers
snake_case__ : Dict = num_ext_layers
snake_case__ : Optional[int] = num_switch_layers + num_ext_layers
snake_case__ : Optional[Any] = num_heads
snake_case__ : Any = num_experts
snake_case__ : Tuple = expert_capacity
snake_case__ : Tuple = dropout_rate
snake_case__ : Union[str, Any] = layer_norm_epsilon
snake_case__ : List[Any] = router_bias
snake_case__ : Union[str, Any] = router_jitter_noise
snake_case__ : Optional[Any] = router_dtype
snake_case__ : Tuple = router_ignore_padding_tokens
snake_case__ : Dict = output_hidden_states
snake_case__ : Union[str, Any] = output_attentions
snake_case__ : Optional[int] = initializer_factor
snake_case__ : Tuple = output_router_logits
snake_case__ : Any = use_cache
super().__init__(
separator_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
| 703
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
snake_case__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __snake_case( _lowerCAmelCase = 5_000 ) -> int:
snake_case__ : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
snake_case__ : Any = pentagonal_nums[j]
snake_case__ : Any = pentagonal_i + pentagonal_j
snake_case__ : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 301
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCamelCase = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
UpperCamelCase = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase_ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase_ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = r".*sequential.(\d+).*"
lowerCAmelCase__ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase__ = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
# replace sequential layers with list
lowerCAmelCase__ = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 )
lowerCAmelCase__ = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase_ )//3}.linear.' )
elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCAmelCase__ = 1 if projecton_layer == 0 else 2
lowerCAmelCase__ = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCAmelCase__ = value
lowerCAmelCase__ = mixed_qkv.size(0 ) // 3
lowerCAmelCase__ = mixed_qkv[:qkv_dim]
lowerCAmelCase__ = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCAmelCase__ = mixed_qkv[qkv_dim * 2 :]
lowerCAmelCase__ = query_layer
lowerCAmelCase__ = key_layer
lowerCAmelCase__ = value_layer
else:
lowerCAmelCase__ = value
return model_state_dict
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ )
clap_model.eval()
lowerCAmelCase__ = clap_model.state_dict()
lowerCAmelCase__ = rename_state_dict(lowerCAmelCase_ )
lowerCAmelCase__ = ClapConfig()
lowerCAmelCase__ = enable_fusion
lowerCAmelCase__ = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 61
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 514
| 0
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :Tuple=None , __lowerCamelCase :Dict=None ):
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
_lowercase : Tuple = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
_lowercase : Tuple = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
_lowercase : Optional[Any] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
_lowercase : List[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
_lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
_lowercase : Any = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
_lowercase : Any = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} )
_lowercase : Union[str, Any] = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} )
_lowercase : Any = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
_lowercase : Tuple = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} )
_lowercase : List[Any] = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} )
_lowercase : Union[str, Any] = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} )
_lowercase : Dict = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} )
_lowercase : Tuple = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
_lowercase : Dict = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
_lowercase : int = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
_lowercase : List[str] = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
_lowercase : Union[str, Any] = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
_lowercase : int = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
_lowercase : Tuple = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
_lowercase : Union[str, Any] = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
_lowercase : Dict = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , lowercase_ , )
def _lowercase ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _lowercase ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def _lowercase ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 716
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int] ):
if len(__lowerCamelCase ) == 0:
return array
_lowerCAmelCase , _lowerCAmelCase = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
_lowerCAmelCase = _max - _min + 1
_lowerCAmelCase , _lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase = i - _min
_lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""")
_lowercase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 162
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=3 ,_A=30 ,_A=400 ,_A=True ,_A=None ,_A=0.9 ,_A=None ,_A=True ,_A=[0.5, 0.5, 0.5] ,_A=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = size if size is not None else {'shortest_edge': 30}
_lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Union[str, Any] = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : Dict = do_resize_and_center_crop
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : List[str] = crop_pct
_lowerCAmelCase : List[str] = crop_size
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Dict = image_mean
_lowerCAmelCase : List[Any] = image_std
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase ,'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_lowercase ,'size' ) )
self.assertTrue(hasattr(_lowercase ,'crop_pct' ) )
self.assertTrue(hasattr(_lowercase ,'do_normalize' ) )
self.assertTrue(hasattr(_lowercase ,'image_mean' ) )
self.assertTrue(hasattr(_lowercase ,'image_std' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size ,{'height': 30, 'width': 30} )
_lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,Image.Image )
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_lowercase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : int = image_processing(_lowercase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(_lowercase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 259
|
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] ,model_result['''ss'''] ):
snake_case__ : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : List[Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : List[Any] = PyTorchBenchmark(__lowercase )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self :Any ):
snake_case__ : int = '''sgugger/tiny-distilbert-classification'''
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,only_pretrain_model=__lowercase ,)
snake_case__ : Dict = PyTorchBenchmark(__lowercase )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = '''sshleifer/tiny-gpt2'''
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,torchscript=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : List[Any] = PyTorchBenchmark(__lowercase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' ,'''Cant do half precision''' )
def __lowerCamelCase ( self :str ):
snake_case__ : str = '''sshleifer/tiny-gpt2'''
snake_case__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,fpaa=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : Any = PyTorchBenchmark(__lowercase )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[str] = '''sshleifer/tiny-gpt2'''
snake_case__ : Tuple = AutoConfig.from_pretrained(__lowercase )
# set architectures equal to `None`
snake_case__ : str = None
snake_case__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : str = PyTorchBenchmark(__lowercase ,configs=[config] )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = '''sshleifer/tiny-gpt2'''
snake_case__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : Union[str, Any] = PyTorchBenchmark(__lowercase )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' ,'''Can\'t do half precision''' )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = '''sshleifer/tiny-gpt2'''
snake_case__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=__lowercase ,multi_process=__lowercase ,)
snake_case__ : Optional[int] = PyTorchBenchmark(__lowercase )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
snake_case__ : Dict = AutoConfig.from_pretrained(__lowercase )
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : Any = PyTorchBenchmark(__lowercase ,configs=[config] )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = '''sshleifer/tinier_bart'''
snake_case__ : Tuple = AutoConfig.from_pretrained(__lowercase )
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : List[str] = PyTorchBenchmark(__lowercase ,configs=[config] )
snake_case__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Dict = '''sshleifer/tiny-gpt2'''
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__lowercase )
snake_case__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : str = PyTorchBenchmark(__lowercase ,configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = '''sshleifer/tinier_bart'''
snake_case__ : Optional[int] = AutoConfig.from_pretrained(__lowercase )
snake_case__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__lowercase ,)
snake_case__ : Union[str, Any] = PyTorchBenchmark(__lowercase ,configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : int = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,save_to_csv=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__lowercase ,'''inf_time.csv''' ) ,train_memory_csv_file=os.path.join(__lowercase ,'''train_mem.csv''' ) ,inference_memory_csv_file=os.path.join(__lowercase ,'''inf_mem.csv''' ) ,train_time_csv_file=os.path.join(__lowercase ,'''train_time.csv''' ) ,env_info_csv_file=os.path.join(__lowercase ,'''env.csv''' ) ,multi_process=__lowercase ,)
snake_case__ : Optional[int] = PyTorchBenchmark(__lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowercase ,'''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowercase ,'''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowercase ,'''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowercase ,'''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowercase ,'''env.csv''' ) ).exists() )
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__lowercase :int ):
self.assertTrue(hasattr(__lowercase ,'''sequential''' ) )
self.assertTrue(hasattr(__lowercase ,'''cumulative''' ) )
self.assertTrue(hasattr(__lowercase ,'''current''' ) )
self.assertTrue(hasattr(__lowercase ,'''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=__lowercase ,inference=__lowercase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__lowercase ,'''log.txt''' ) ,log_print=__lowercase ,trace_memory_line_by_line=__lowercase ,multi_process=__lowercase ,)
snake_case__ : List[str] = PyTorchBenchmark(__lowercase )
snake_case__ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__lowercase ,'''log.txt''' ) ).exists() )
| 219
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
def __init__( self :int ,__lowercase :Union[str, Any] ,__lowercase :Optional[int]=1_3 ,__lowercase :Dict=7 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=True ,__lowercase :Tuple=False ,__lowercase :Optional[int]=True ,__lowercase :Optional[int]=9_9 ,__lowercase :Optional[Any]=3_2 ,__lowercase :Union[str, Any]=5 ,__lowercase :Dict=4 ,__lowercase :Optional[Any]=3_7 ,__lowercase :Optional[int]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :str=5_1_2 ,__lowercase :str=1_6 ,__lowercase :Optional[Any]=2 ,__lowercase :Union[str, Any]=0.02 ,__lowercase :Optional[int]=3 ,__lowercase :Optional[Any]=4 ,__lowercase :Any=None ,):
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = seq_length
snake_case__ : Tuple = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : int = use_token_type_ids
snake_case__ : List[str] = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[Any] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[Any] = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Optional[int] = scope
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Tuple = None
if self.use_token_type_ids:
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Optional[Any] ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :int ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ):
snake_case__ : int = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :Optional[Any] ,__lowercase :Tuple ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Dict ,__lowercase :List[Any] ,):
snake_case__ : List[str] = True
snake_case__ : Union[str, Any] = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[int] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,)
snake_case__ : str = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,)
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :int ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :List[Any] ,):
snake_case__ : Optional[int] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : int = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Tuple ,__lowercase :str ,__lowercase :int ,__lowercase :Any ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :List[str] ,):
snake_case__ : int = True
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : Tuple = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,use_cache=__lowercase ,)
snake_case__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
snake_case__ : Any = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : Any = False
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Any = LlamaModelTester(self )
snake_case__ : Dict = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = 3
snake_case__ : Union[str, Any] = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase )
snake_case__ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[str] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : List[Any] = '''single_label_classification'''
snake_case__ : Tuple = input_dict['''input_ids''']
snake_case__ : Optional[int] = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Dict = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = 3
snake_case__ : Optional[int] = '''multi_label_classification'''
snake_case__ : str = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase )
snake_case__ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Optional[int] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __lowerCamelCase ( self :Dict ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Tuple ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = ids_tensor([1, 1_0] ,config.vocab_size )
snake_case__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Any = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case__ : Any = original_model(__lowercase ).last_hidden_state
snake_case__ : Any = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : str = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case__ : List[str] = scaled_model(__lowercase ).last_hidden_state
snake_case__ : Dict = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
snake_case__ : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
snake_case__ : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
snake_case__ : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
snake_case__ : Any = model(torch.tensor(__lowercase ) )
snake_case__ : Optional[int] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
snake_case__ : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : Tuple = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that '''
snake_case__ : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case__ : List[Any] = tokenizer.encode(__lowercase ,return_tensors='''pt''' )
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=__lowercase )
# greedy generation outputs
snake_case__ : int = model.generate(__lowercase ,max_new_tokens=6_4 ,top_p=__lowercase ,temperature=1 ,do_sample=__lowercase )
snake_case__ : Union[str, Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
| 219
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=8 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=16 , snake_case_=5 , snake_case_=2 , snake_case_=36 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self ):
_A = self.get_config()
_A = 300
return config
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = MraModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , )
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = MraForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = ()
def lowerCAmelCase__ ( self ):
_A = MraModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MraModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase__ ( self ):
return
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_A = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 27
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.02 , ):
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Dict = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Any = vocab_size
lowercase : List[Any] = hidden_size
lowercase : str = rotary_dim
lowercase : Tuple = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[Any] = initializer_range
lowercase : str = None
lowercase : Dict = vocab_size - 1
lowercase : List[Any] = vocab_size - 1
lowercase : Optional[Any] = vocab_size - 1
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
lowercase : str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = 20
lowercase : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : int = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : str = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = 20
lowercase : List[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : List[Any] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
lowercase : List[Any] = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@tooslow
def __lowerCamelCase ( self ):
lowercase : str = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowercase : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : Tuple = False
lowercase : List[Any] = model.config.eos_token_id
lowercase : Optional[int] = jax.jit(model.generate )
lowercase : Optional[Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = pt_inputs['''input_ids'''].shape
lowercase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : int = 0
lowercase : List[str] = 1
lowercase : int = 0
lowercase : Optional[Any] = 1
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_state
with torch.no_grad():
lowercase : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Union[str, Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[Any] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params )
lowercase , lowercase : Optional[int] = pt_inputs['''input_ids'''].shape
lowercase : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = 0
lowercase : List[Any] = 1
lowercase : str = 0
lowercase : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase : List[Any] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : int = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowercase : Dict = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase : Dict = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 319
| 0
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , lowerCAmelCase , )
super().__init__(args=lowerCAmelCase , **lowerCAmelCase )
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _A ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : str = None , lowerCamelCase : int = None , lowerCamelCase : Dict = False , lowerCamelCase : Dict = False , lowerCamelCase : str = None , lowerCamelCase : str = None , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__lowercase = Generator(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _snake_case ( self : int ):
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split="train" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 402
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Tuple = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> Tuple:
_a = FlaxBertModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_a = FlaxBertModel.from_pretrained("bert-base-cased" )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 691
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCamelCase : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 367
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( __snake_case = "" ):
__lowerCAmelCase = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__lowerCAmelCase = BeautifulSoup(requests.get(__snake_case ).text , "html.parser" )
__lowerCAmelCase = soup.find_all("td" , attrs="titleColumn" )
__lowerCAmelCase = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__snake_case , __snake_case )
}
def __lowerCAmelCase ( __snake_case = "IMDb_Top_250_Movies.csv" ):
__lowerCAmelCase = get_imdb_top_aaa_movies()
with open(__snake_case , "w" , newline="" ) as out_file:
__lowerCAmelCase = csv.writer(__snake_case )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 367
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : List[Any] = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__SCREAMING_SNAKE_CASE : Dict = Features({'text': Value('string' )} )
__SCREAMING_SNAKE_CASE : Any = Features({'labels': ClassLabel} )
__SCREAMING_SNAKE_CASE : Any = 'text'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'labels'
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ = copy.deepcopy(self )
lowercase_ = self.label_schema.copy()
lowercase_ = features[self.label_column]
lowercase_ = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 412
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = "__DUMMY_TRANSFORMERS_USER__"
_lowerCAmelCase : Dict = "Dummy User"
_lowerCAmelCase : Union[str, Any] = "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"
_lowerCAmelCase : List[Any] = "https://hub-ci.huggingface.co"
_lowerCAmelCase : Union[str, Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_lowerCAmelCase : int = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_lowerCAmelCase : Dict = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _snake_case )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
HfFolder.save_token(_snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def UpperCamelCase_( ):
"""simple docstring"""
return HfApi(endpoint=_snake_case )
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi ):
"""simple docstring"""
__a =HfFolder.get_token()
HfFolder.save_token(_snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
def _cleanup_repo(_snake_case : Optional[int] ):
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
@contextmanager
def _temporary_repo(_snake_case : Optional[int] ):
try:
yield repo_id
finally:
cleanup_repo(_snake_case )
return _temporary_repo
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi , _snake_case : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
__a =F'repo_txt_data-{int(time.time() * 1_0e3 )}'
__a =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type='dataset' , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo='data/text_data.txt' , repo_id=_snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi , _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =F'repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'
__a =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type='dataset' , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo='data.zip' , repo_id=_snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase_( _snake_case : str , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi , _snake_case : Dict , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =F'repo_zipped_img_data-{int(time.time() * 1_0e3 )}'
__a =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type='dataset' , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo='data.zip' , repo_id=_snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 242
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCAmelCase : Any = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : List[str] = torch.load(a , map_location='cpu' )
return sd
def _SCREAMING_SNAKE_CASE ( a , a , a=rename_keys_prefix ) -> int:
__A : Any = OrderedDict()
__A : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__A : List[Any] = key
for name_pair in rename_keys_prefix:
__A : Union[str, Any] = new_key.replace(name_pair[0] , name_pair[1] )
__A : str = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__A : Union[str, Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__A : List[Any] = 'pretraining'
if "vcr" in checkpoint_path:
__A : List[Any] = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
__A : Optional[Any] = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
__A : List[str] = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
__A : Optional[int] = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__A : Optional[Any] = {'visual_embedding_dim': 5_12}
__A : int = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__A : Dict = {'visual_embedding_dim': 20_48}
__A : int = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__A : Any = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
__A : Any = 'vqa'
elif "nlvr" in checkpoint_path:
__A : Any = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
__A : List[str] = 'nlvr'
__A : Dict = VisualBertConfig(**a )
# Load State Dict
__A : Tuple = load_state_dict(a )
__A : Union[str, Any] = get_new_dict(a , a )
if model_type == "pretraining":
__A : Optional[int] = VisualBertForPreTraining(a )
elif model_type == "vqa":
__A : Any = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
__A : Tuple = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
__A : Union[str, Any] = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 77
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = (KDPMaDiscreteScheduler,)
UpperCamelCase_ : Optional[int] = 10
def A_ ( self , **a ) -> int:
'''simple docstring'''
_UpperCamelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a )
return config
def A_ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def A_ ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A_ ( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(a , a )
_UpperCamelCase = model(a , a )
_UpperCamelCase = scheduler.step(a , a , a )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(a ) )
_UpperCamelCase = torch.mean(torch.abs(a ) )
if str(a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 612
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : str = ["pixel_values"]
def __init__( self , a = True , a = None , a = PIL.Image.BICUBIC , a = True , a = None , a = 1 / 2_55 , a = True , a = True , a = None , a = None , **a , ) -> None:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = size if size is not None else {"""height""": 2_56, """width""": 2_56}
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCamelCase = get_size_dict(a , param_name="""crop_size""" )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self , a , a , a = PIL.Image.BICUBIC , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
a , size=(size["""height"""], size["""width"""]) , resample=a , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(a , size=(size["""height"""], size["""width"""]) , data_format=a , **a )
def A_ ( self , a , a , a = None , **a , ) -> List[str]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self , a , a = None , a = None , a=None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(a , param_name="""crop_size""" )
_UpperCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a , a ) for image in images]
_UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=a , tensor_type=a )
| 612
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ''','''
a__ = None
a__ = '''infer'''
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = '''.'''
a__ = None
a__ = '''"'''
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = '''strict'''
a__ = '''error'''
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 719
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Dict = 3
snake_case_ : Union[str, Any] = (32, 32)
snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self :Dict ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def _A ( self :Any ) -> str:
'''simple docstring'''
def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ):
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.ones([0] )
def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.dummy_cond_unet
snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Dict = self.dummy_text_encoder
snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : str = 77
snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ )
snake_case_ : Tuple = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Dict = "A painting of a squirrel eating a burger"
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
snake_case_ : Any = output.images
snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
snake_case_ : Optional[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
snake_case_ : Tuple = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :int ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.dummy_cond_unet
snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
snake_case_ : int = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : int = 77
snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
snake_case_ : Optional[Any] = unet.half()
snake_case_ : Tuple = vae.half()
snake_case_ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : List[Any] = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : str = init_image.resize((760, 504) )
snake_case_ : Optional[Any] = "BAAI/AltDiffusion"
snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : str = output.images[0]
snake_case_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : List[Any] = init_image.resize((768, 512) )
snake_case_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : Any = "BAAI/AltDiffusion"
snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = "A fantasy landscape, trending on artstation"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['ConditionalDetrFeatureExtractor']
a = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Union[str, Any] = LDMTextToImagePipeline
lowercase_ : Tuple = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
lowercase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowercase_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : Tuple = False
def a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCamelCase : List[str] = CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCamelCase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith("""mps""" ):
__UpperCamelCase : str = torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : Optional[int] = LDMTextToImagePipeline(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[Any] = self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCamelCase : int = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=torch.floataa , lowerCamelCase__ : List[Any]=0 ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 32, 32) )
__UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
__UpperCamelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Any = self.get_inputs(lowerCamelCase__ )
__UpperCamelCase : Tuple = pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCamelCase : Optional[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__UpperCamelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=torch.floataa , lowerCamelCase__ : Tuple=0 ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 32, 32) )
__UpperCamelCase : List[Any] = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Tuple = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict = self.get_inputs(lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ ).images[0]
__UpperCamelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCamelCase : int = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 269
| 0
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase__ : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
UpperCAmelCase__ : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
UpperCAmelCase__ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="auto" , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=0.9 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=5_00 , SCREAMING_SNAKE_CASE__="gpt2-large" , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=25 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = compute_mauve(
p_text=SCREAMING_SNAKE_CASE__ , q_text=SCREAMING_SNAKE_CASE__ , p_features=SCREAMING_SNAKE_CASE__ , q_features=SCREAMING_SNAKE_CASE__ , p_tokens=SCREAMING_SNAKE_CASE__ , q_tokens=SCREAMING_SNAKE_CASE__ , num_buckets=SCREAMING_SNAKE_CASE__ , pca_max_data=SCREAMING_SNAKE_CASE__ , kmeans_explained_var=SCREAMING_SNAKE_CASE__ , kmeans_num_redo=SCREAMING_SNAKE_CASE__ , kmeans_max_iter=SCREAMING_SNAKE_CASE__ , featurize_model_name=SCREAMING_SNAKE_CASE__ , device_id=SCREAMING_SNAKE_CASE__ , max_text_length=SCREAMING_SNAKE_CASE__ , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE__ , mauve_scaling_factor=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ , )
return out
| 545
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 1
while repunit:
SCREAMING_SNAKE_CASE__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_ ( _snake_case = 1_000_000 ):
SCREAMING_SNAKE_CASE__ : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 545
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 475
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26
| 0
|
"""simple docstring"""
import argparse
UpperCamelCase_ = """docs/source/_static/js/custom.js"""
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
with open(UpperCAmelCase , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
a_ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
a_ = F'''const stableVersion = \"v{version}\"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' \"v{version}\": \"v{version}\",\n'''
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
UpperCamelCase_ = parser.parse_args()
update_custom_js(args.version)
| 701
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = """switch_transformers"""
a_ : Tuple = ["""past_key_values"""]
a_ : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __UpperCAmelCase=3_21_28 , __UpperCAmelCase=7_68 , __UpperCAmelCase=64 , __UpperCAmelCase=20_48 , __UpperCAmelCase=64 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.01 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=1_28 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.001 , __UpperCAmelCase=0.001 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = vocab_size
a_ = d_model
a_ = d_kv
a_ = d_ff
a_ = num_sparse_encoder_layers
a_ = num_layers
a_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ = self.num_layers // self.num_sparse_encoder_layers
else:
a_ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ = num_heads
a_ = num_experts
a_ = expert_capacity
a_ = router_bias
a_ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''')
a_ = router_dtype
a_ = router_ignore_padding_tokens
a_ = relative_attention_num_buckets
a_ = relative_attention_max_distance
a_ = dropout_rate
a_ = layer_norm_epsilon
a_ = initializer_factor
a_ = feed_forward_proj
a_ = use_cache
a_ = add_router_probs
a_ = router_z_loss_coef
a_ = router_aux_loss_coef
a_ = self.feed_forward_proj.split("-")
a_ = act_info[-1]
a_ = act_info[0] == "gated"
if len(__UpperCAmelCase) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ = "gelu_new"
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
| 210
| 0
|
"""simple docstring"""
from typing import Any
class UpperCAmelCase_ :
def __init__( self : List[str] , A : Any ):
_UpperCAmelCase : Optional[int] = data
_UpperCAmelCase : Dict = None
class UpperCAmelCase_ :
def __init__( self : Tuple ):
_UpperCAmelCase : Dict = None
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Dict = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : int = temp.next
print()
def snake_case_ ( self : Any , A : Any ):
_UpperCAmelCase : List[Any] = Node(A )
_UpperCAmelCase : List[str] = self.head
_UpperCAmelCase : int = new_node
def snake_case_ ( self : Any , A : Tuple , A : int ):
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : str = node_a.next
_UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : str = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCAmelCase : int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 289
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'roberta'
def __init__( self : str , A : Optional[int]=5_0_2_6_5 , A : Optional[int]=7_6_8 , A : Tuple=1_2 , A : Optional[Any]=1_2 , A : Optional[int]=3_0_7_2 , A : str="gelu" , A : List[Any]=0.1 , A : Optional[Any]=0.1 , A : Any=5_1_2 , A : Dict=2 , A : Union[str, Any]=0.02 , A : str=1e-12 , A : Optional[int]=1 , A : List[str]=0 , A : int=2 , A : Any="absolute" , A : Optional[int]=True , A : int=None , **A : str , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : int = use_cache
_UpperCAmelCase : Dict = classifier_dropout
class UpperCAmelCase_ ( _UpperCamelCase ):
@property
def snake_case_ ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 289
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
A_ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_lowerCAmelCase ,id=_lowerCAmelCase )
| 708
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
'''simple docstring'''
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Tuple = constant_matrix.shape
if rowsa != colsa:
A_ : int = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
A_ : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
A_ : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
A_ : Any = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
A_ : Union[str, Any] = []
for row in range(_lowerCAmelCase ):
A_ : str = 0
for col in range(_lowerCAmelCase ):
if col == row:
A_ : Optional[Any] = table[row][col]
elif col == cols - 1:
A_ : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : str = (temp + val) / denom
new_val.append(_lowerCAmelCase )
A_ : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ , A_ : str = table.shape
A_ : Any = True
for i in range(0 ,_lowerCAmelCase ):
A_ : Optional[Any] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[str] , snake_case : Dict , snake_case : Any=7 , snake_case : str=3 , snake_case : List[str]=18 , snake_case : Union[str, Any]=30 , snake_case : Optional[Any]=400 , snake_case : Any=True , snake_case : Optional[int]=None , snake_case : Tuple=True , snake_case : List[Any]=None , snake_case : Union[str, Any]=True , snake_case : str=[0.5, 0.5, 0.5] , snake_case : int=[0.5, 0.5, 0.5] , snake_case : Optional[int]=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : List[str] = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : List[str] = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std
SCREAMING_SNAKE_CASE : str = do_reduce_labels
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __a ( ) -> int:
SCREAMING_SNAKE_CASE : str = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE : int = Image.open(dataset[0]['file'] )
SCREAMING_SNAKE_CASE : Dict = Image.open(dataset[1]['file'] )
return image, map
def __a ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Dict = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE : List[Any] = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(ds[1]['file'] )
SCREAMING_SNAKE_CASE : str = Image.open(ds[2]['file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BeitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'center_crop' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , snake_case )
SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=snake_case )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , snake_case )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
SCREAMING_SNAKE_CASE : str = []
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE : List[Any] = image_processing(snake_case , snake_case , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processing(snake_case , snake_case , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processing(snake_case , snake_case , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : List[str] = image_processing(snake_case , snake_case , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = image_processing(snake_case , snake_case , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 352
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 352
| 1
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_snake_case : str = get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
UpperCamelCase = '''all_checks'''
UpperCamelCase = '''basic_checks'''
UpperCamelCase = '''no_checks'''
class _UpperCAmelCase ( lowercase_ ):
pass
class _UpperCAmelCase ( lowercase_ ):
pass
class _UpperCAmelCase ( lowercase_ ):
pass
class _UpperCAmelCase ( lowercase_ ):
pass
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A = " for " + verification_name if verification_name is not None else ""
if len(UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _UpperCAmelCase ( lowercase_ ):
pass
class _UpperCAmelCase ( lowercase_ ):
pass
class _UpperCAmelCase ( lowercase_ ):
pass
class _UpperCAmelCase ( lowercase_ ):
pass
def A__ ( UpperCamelCase , UpperCamelCase ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
if len(set(UpperCamelCase ) - set(UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase ) - set(UpperCamelCase ) ) )
A = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase ) )
logger.info("All the splits matched successfully." )
def A__ ( UpperCamelCase , UpperCamelCase = True ):
if record_checksum:
A = shaaaa()
with open(UpperCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(UpperCamelCase )
A = m.hexdigest()
else:
A = None
return {"num_bytes": os.path.getsize(UpperCamelCase ), "checksum": checksum}
def A__ ( UpperCamelCase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 524
|
"""simple docstring"""
def A__ ( UpperCamelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
A = sorted(string.lower() )
return len(UpperCamelCase ) == len(set(UpperCamelCase ) )
if __name__ == "__main__":
_snake_case : Optional[Any] = input('Enter a string ').strip()
_snake_case : Any = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 524
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(_UpperCamelCase )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(_UpperCamelCase , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowercase = load_checkpoint(_UpperCamelCase )
if config is not None:
__lowercase = OPTConfig.from_pretrained(_UpperCamelCase )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
a : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 639
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
a : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a : List[Any] = dict(zip(vocab, range(len(vocab))))
a : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
a : Union[str, Any] = Path(tmpdirname)
a : Dict = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
a : Optional[int] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
a : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
a : Optional[int] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a : List[str] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
a : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
a : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 639
| 1
|
"""simple docstring"""
from manim import *
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("CPU" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__lowerCAmelCase = [mem.copy() for i in range(4 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("GPU" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("Model" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__a ):
__lowerCAmelCase = fill.copy().set_fill(__a , opacity=0.8 )
target.move_to(__a )
model_arr.append(__a )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__a )
self.add(*__a , *__a )
__lowerCAmelCase = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("Disk" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4, -1.2_5, 0] )
self.add(__a , __a )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__lowerCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
__lowerCAmelCase = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) )
__lowerCAmelCase = Square(0.3 )
input.set_fill(__a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __a , buff=0.5 )
self.play(Write(__a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__a , buff=0.0_2 )
self.play(MoveToTarget(__a ) )
self.play(FadeOut(__a ) )
__lowerCAmelCase = Arrow(start=__a , end=__a , color=__a , buff=0.5 )
a.next_to(model_arr[0].get_left() , __a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowerCAmelCase = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
__lowerCAmelCase = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(__a ) , Circumscribe(model_arr[0] , color=__a , **__a ) , Circumscribe(model_cpu_arr[0] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowerCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , __a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__lowerCAmelCase = AnimationGroup(
FadeOut(__a , run_time=0.5 ) , MoveToTarget(__a , run_time=0.5 ) , FadeIn(__a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowerCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **__a ) , Circumscribe(cpu_left_col_base[i] , **__a ) , Circumscribe(cpu_left_col_base[i + 1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , Circumscribe(model_arr[i + 1] , color=__a , **__a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__a , **__a ) , Circumscribe(cpu_left_col_base[-1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowerCAmelCase = a_c
__lowerCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(__a ) , FadeOut(__a , run_time=0.5 ) , )
__lowerCAmelCase = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) , MoveToTarget(__a ) )
self.wait()
| 706
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False , **__a ):
super().__init__(**__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_embed
__lowerCAmelCase = d_proj
__lowerCAmelCase = cutoffs + [vocab_size]
__lowerCAmelCase = [0] + self.cutoffs
__lowerCAmelCase = div_val
__lowerCAmelCase = self.cutoffs[0]
__lowerCAmelCase = len(self.cutoffs ) - 1
__lowerCAmelCase = self.shortlist_size + self.n_clusters
__lowerCAmelCase = keep_order
__lowerCAmelCase = []
__lowerCAmelCase = []
def snake_case ( self , __a ):
if self.n_clusters > 0:
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__a , name="cluster_weight" )
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__a , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" , )
self.out_projs.append(__a )
else:
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = self.d_embed // (self.div_val**i)
__lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" )
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(__a )
@staticmethod
def snake_case ( __a , __a , __a , __a=None ):
__lowerCAmelCase = x
if proj is not None:
__lowerCAmelCase = tf.einsum("ibd,ed->ibe" , __a , __a )
return tf.einsum("ibd,nd->ibn" , __a , __a ) + b
@staticmethod
def snake_case ( __a , __a ):
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
__lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(__a , __a )
def snake_case ( self , __a , __a , __a=True , __a=False ):
__lowerCAmelCase = 0
if self.n_clusters == 0:
__lowerCAmelCase = self._logit(__a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__a , logits=__a )
__lowerCAmelCase = tf.nn.log_softmax(__a , axis=-1 )
else:
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = []
__lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowerCAmelCase = (target >= l_idx) & (target < r_idx)
__lowerCAmelCase = tf.where(__a )
__lowerCAmelCase = tf.boolean_mask(__a , __a ) - l_idx
if self.div_val == 1:
__lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
__lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
__lowerCAmelCase = self.out_layers[i][0]
__lowerCAmelCase = self.out_layers[i][1]
if i == 0:
__lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
__lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[0] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
else:
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[i] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
__lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__a )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__a , -cur_logprob , shape_list(__a ) )
__lowerCAmelCase = tf.concat(__a , axis=-1 )
if target is not None:
if return_mean:
__lowerCAmelCase = tf.reduce_mean(__a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__a , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 282
| 0
|
from math import factorial, radians
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ = 18 , UpperCAmelCase_ = 10):
"""simple docstring"""
snake_case__ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
snake_case__ : Optional[int] = radians(UpperCAmelCase_)
snake_case__ : Optional[Any] = angle_in_radians
snake_case__ : Optional[int] = 3
snake_case__ : List[str] = -1
for _ in range(UpperCAmelCase_):
result += (b * (angle_in_radians**a)) / factorial(UpperCAmelCase_)
snake_case__ : List[str] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(UpperCAmelCase_ , UpperCAmelCase_)
if __name__ == "__main__":
__import__('doctest').testmod()
| 648
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase__ :
"""simple docstring"""
def __init__( self : Dict , __a : List[str] , __a : List[str]=1_3 , __a : List[str]=7 , __a : Dict=True , __a : str=True , __a : str=9_9 , __a : Dict=3_2 , __a : Optional[int]=5 , __a : List[Any]=4 , __a : Dict=3_7 , __a : List[Any]="gelu" , __a : str=0.1 , __a : Dict=0.1 , __a : Optional[Any]=5_0 , __a : Dict=0.02 , __a : List[Any]=True , __a : str=None , ):
snake_case__ : int = parent
snake_case__ : Any = batch_size
snake_case__ : Any = seq_length
snake_case__ : Dict = is_training
snake_case__ : str = use_input_mask
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Any = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Tuple = initializer_range
snake_case__ : str = use_labels
snake_case__ : List[str] = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Any = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase ( self : Optional[Any] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , )
def lowercase ( self : List[str] ):
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
snake_case__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : int , __a : List[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , **__a : Union[str, Any] , ):
snake_case__ : Optional[Any] = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a , attention_mask=__a )
snake_case__ : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : str , __a : Union[str, Any] , __a : Any , __a : int , __a : int , __a : List[Any] , __a : List[str] , **__a : List[Any] , ):
snake_case__ : Union[str, Any] = True
snake_case__ : int = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : List[str] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
snake_case__ : List[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : int , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : str , __a : Union[str, Any] , __a : str , **__a : List[Any] , ):
snake_case__ : int = True
snake_case__ : Optional[int] = True
snake_case__ : List[str] = BertGenerationDecoder(config=__a ).to(__a ).eval()
# first forward pass
snake_case__ : Union[str, Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
snake_case__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["""hidden_states"""][0]
snake_case__ : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
snake_case__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def lowercase ( self : Union[str, Any] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : int , *__a : str , ):
snake_case__ : Union[str, Any] = BertGenerationDecoder(__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__UpperCamelCase : str = (BertGenerationDecoder,) if is_torch_available() else ()
__UpperCamelCase : str = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase ( self : str ):
snake_case__ : Dict = BertGenerationEncoderTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase ( self : List[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = """bert"""
self.model_tester.create_and_check_model(__a , __a , __a , __a )
def lowercase ( self : str ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowercase ( self : Any ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowercase ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : int = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , )
def lowercase ( self : int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def lowercase ( self : List[str] ):
snake_case__ : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__a )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[int] ):
snake_case__ : Dict = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Optional[Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __a )
snake_case__ : List[Any] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Any ):
snake_case__ : List[str] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Tuple = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Any = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __a )
snake_case__ : int = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 648
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (0, 0)
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Any = 0
def __eq__( self : List[str] , a : Any ) -> Tuple:
"""simple docstring"""
return self.position == cell.position
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
print(self.position )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a : List[Any]=(5, 5) ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(a )
SCREAMING_SNAKE_CASE : str = world_size[0]
SCREAMING_SNAKE_CASE : int = world_size[1]
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
print(self.w )
def __UpperCamelCase ( self : Dict , a : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = cell.position[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = cell.position[1]
SCREAMING_SNAKE_CASE : Optional[int] = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE : Optional[Any] = current_x + n[0]
SCREAMING_SNAKE_CASE : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE : int = Cell()
SCREAMING_SNAKE_CASE : str = (x, y)
SCREAMING_SNAKE_CASE : Any = cell
neighbours.append(a )
return neighbours
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = []
_open.append(_a)
while _open:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmin([n.f for n in _open])
SCREAMING_SNAKE_CASE : List[str] = _open[min_f]
_closed.append(_open.pop(_a))
if current == goal:
break
for n in world.get_neigbours(_a):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE : Tuple = current.g + 1
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = n.position
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = goal.position
SCREAMING_SNAKE_CASE : Any = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_a)
SCREAMING_SNAKE_CASE : Tuple = []
while current.parent is not None:
path.append(current.position)
SCREAMING_SNAKE_CASE : Union[str, Any] = current.parent
path.append(current.position)
return path[::-1]
if __name__ == "__main__":
a_ = Gridworld()
# Start position and goal
a_ = Cell()
a_ = (0, 0)
a_ = Cell()
a_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
a_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ = 1
print(world.w)
| 193
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = 'RegNetConfig'
# Base docstring
_A = 'facebook/regnet-y-040'
_A = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_A = 'facebook/regnet-y-040'
_A = 'tabby, tabby cat'
_A = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : int , _snake_case : int = 3 , _snake_case : int = 1 , _snake_case : int = 1 , _snake_case : Optional[str] = "relu" , **_snake_case : Union[str, Any] , ) -> Any:
super().__init__(**_snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=_snake_case , strides=_snake_case , padding="VALID" , groups=_snake_case , use_bias=_snake_case , name="convolution" , )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
SCREAMING_SNAKE_CASE__ = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : Any , _snake_case : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.convolution(self.padding(_snake_case ) )
SCREAMING_SNAKE_CASE__ = self.normalization(_snake_case )
SCREAMING_SNAKE_CASE__ = self.activation(_snake_case )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , _snake_case : RegNetConfig , **_snake_case : Tuple ) -> int:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = config.num_channels
SCREAMING_SNAKE_CASE__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = shape_list(_snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE__ = tf.transpose(_snake_case , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ = self.embedder(_snake_case )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : int , _snake_case : int = 2 , **_snake_case : Union[str, Any] ) -> List[str]:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=1 , strides=_snake_case , use_bias=_snake_case , name="convolution" )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase_ ( self : Dict , _snake_case : tf.Tensor , _snake_case : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(_snake_case ) , training=_snake_case )
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , _snake_case : int , _snake_case : int , **_snake_case : Union[str, Any] ) -> List[str]:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name="pooler" )
SCREAMING_SNAKE_CASE__ = [
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Optional[int] ) -> Optional[int]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
SCREAMING_SNAKE_CASE__ = self.pooler(_snake_case )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE__ = layer_module(_snake_case )
SCREAMING_SNAKE_CASE__ = hidden_state * pooled
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , _snake_case : RegNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 1 , **_snake_case : Dict ) -> int:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE__ = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name="layer.2" ),
]
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Dict , _snake_case : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ = layer_module(_snake_case )
SCREAMING_SNAKE_CASE__ = self.shortcut(_snake_case )
hidden_state += residual
SCREAMING_SNAKE_CASE__ = self.activation(_snake_case )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , _snake_case : RegNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 1 , **_snake_case : str ) -> List[Any]:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
SCREAMING_SNAKE_CASE__ = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name="layer.3" ),
]
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ = layer_module(_snake_case )
SCREAMING_SNAKE_CASE__ = self.shortcut(_snake_case )
hidden_state += residual
SCREAMING_SNAKE_CASE__ = self.activation(_snake_case )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , _snake_case : RegNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 2 , _snake_case : int = 2 , **_snake_case : int ) -> Dict:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
SCREAMING_SNAKE_CASE__ = [
# downsampling is done in the first layer with stride of 2
layer(_snake_case , _snake_case , _snake_case , stride=_snake_case , name="layers.0" ),
*[layer(_snake_case , _snake_case , _snake_case , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : List[Any] , _snake_case : Tuple ) -> str:
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ = layer_module(_snake_case )
return hidden_state
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , _snake_case : RegNetConfig , **_snake_case : List[str] ) -> Optional[int]:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
SCREAMING_SNAKE_CASE__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase_ ( self : List[str] , _snake_case : tf.Tensor , _snake_case : bool = False , _snake_case : bool = True ) -> TFBaseModelOutputWithNoAttention:
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE__ = stage_module(_snake_case )
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
@keras_serializable
class lowerCamelCase (tf.keras.layers.Layer ):
'''simple docstring'''
a = RegNetConfig
def __init__( self : List[str] , _snake_case : Any , **_snake_case : Any ) -> Optional[int]:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = TFRegNetEmbeddings(_snake_case , name="embedder" )
SCREAMING_SNAKE_CASE__ = TFRegNetEncoder(_snake_case , name="encoder" )
SCREAMING_SNAKE_CASE__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name="pooler" )
@unpack_inputs
def lowerCAmelCase_ ( self : List[str] , _snake_case : tf.Tensor , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.embedder(_snake_case , training=_snake_case )
SCREAMING_SNAKE_CASE__ = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
SCREAMING_SNAKE_CASE__ = self.pooler(_snake_case )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE__ = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE__ = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = tuple([tf.transpose(_snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = RegNetConfig
a = "regnet"
a = "pixel_values"
@property
def lowerCAmelCase_ ( self : Any ) -> Union[str, Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_A = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_A = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _SCREAMING_SNAKE_CASE , )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , _snake_case : RegNetConfig , *_snake_case : Optional[Any] , **_snake_case : Optional[Any] ) -> Optional[int]:
super().__init__(_snake_case , *_snake_case , **_snake_case )
SCREAMING_SNAKE_CASE__ = TFRegNetMainLayer(_snake_case , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : Tuple , _snake_case : tf.Tensor , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : str=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.regnet(
pixel_values=_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _SCREAMING_SNAKE_CASE , )
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _snake_case : RegNetConfig , *_snake_case : Union[str, Any] , **_snake_case : List[Any] ) -> Optional[int]:
super().__init__(_snake_case , *_snake_case , **_snake_case )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = TFRegNetMainLayer(_snake_case , name="regnet" )
# classification head
SCREAMING_SNAKE_CASE__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : Any , _snake_case : tf.Tensor = None , _snake_case : tf.Tensor = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Union[str, Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.regnet(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
SCREAMING_SNAKE_CASE__ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__ = self.classifier[0](_snake_case )
SCREAMING_SNAKE_CASE__ = self.classifier[1](_snake_case )
SCREAMING_SNAKE_CASE__ = None if labels is None else self.hf_compute_loss(labels=_snake_case , logits=_snake_case )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 159
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
SCREAMING_SNAKE_CASE__ = np.array(__UpperCAmelCase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__UpperCAmelCase )
return 2.0 * image - 1.0
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Tuple , _snake_case : Union[torch.Tensor, PIL.Image.Image] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[int] = 100 , _snake_case : Optional[float] = 0.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_snake_case , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = 1
elif isinstance(_snake_case , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}""" )
if isinstance(_snake_case , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = preprocess(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__ = next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__ = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
SCREAMING_SNAKE_CASE__ = image.to(device=self.device , dtype=_snake_case )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case , device=self.device )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ = eta
for t in self.progress_bar(_snake_case ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__ = torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__ = self.vqvae.decode(_snake_case ).sample
SCREAMING_SNAKE_CASE__ = torch.clamp(_snake_case , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__ = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 159
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a , _a ):
snake_case : Optional[int] = """maskformer-swin"""
snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=4 , __lowerCAmelCase=3 , __lowerCAmelCase=96 , __lowerCAmelCase=[2, 2, 6, 2] , __lowerCAmelCase=[3, 6, 12, 24] , __lowerCAmelCase=7 , __lowerCAmelCase=4.0 , __lowerCAmelCase=True , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=False , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = len(__lowerCAmelCase )
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
UpperCamelCase__ = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 548
|
import heapq
import sys
import numpy as np
UpperCamelCase__ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
def _lowerCamelCase ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _lowerCamelCase ( self ):
return len(self.elements ) == 0
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowerCAmelCase )
else:
# update
# print("update", item)
UpperCamelCase__ = []
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if item in self.set:
self.set.remove(__lowerCAmelCase )
UpperCamelCase__ = []
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCamelCase ( self ):
return self.elements[0][1]
def _lowerCamelCase ( self ):
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
self.set.remove(__lowerCAmelCase )
return (priority, item)
def _UpperCamelCase (a__ :TPos , a__ :TPos ):
"""simple docstring"""
UpperCamelCase__ = np.array(a__ )
UpperCamelCase__ = np.array(a__ )
return np.linalg.norm(a - b )
def _UpperCamelCase (a__ :TPos , a__ :TPos ):
"""simple docstring"""
return consistent_heuristic(a__ , a__ ) // t
def _UpperCamelCase (a__ :TPos , a__ :TPos ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _UpperCamelCase (a__ :TPos , a__ :int , a__ :TPos , a__ :dict[TPos, float] ):
"""simple docstring"""
UpperCamelCase__ = g_function[start] + Wa * heuristics[i](a__ , a__ )
return ans
def _UpperCamelCase (a__ :Optional[Any] , a__ :Union[str, Any] , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = np.chararray((n, n) )
for i in range(a__ ):
for j in range(a__ ):
UpperCamelCase__ = """*"""
for i in range(a__ ):
for j in range(a__ ):
if (j, (n - 1) - i) in blocks:
UpperCamelCase__ = """#"""
UpperCamelCase__ = """-"""
UpperCamelCase__ = back_pointer[goal]
while x != start:
((UpperCamelCase__) , (UpperCamelCase__)) = x
# print(x)
UpperCamelCase__ = """-"""
UpperCamelCase__ = back_pointer[x]
UpperCamelCase__ = """-"""
for i in range(a__ ):
for j in range(a__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
UpperCamelCase__ = back_pointer[goal]
while x != start:
print(a__ , end=""" """ )
UpperCamelCase__ = back_pointer[x]
print(a__ )
sys.exit()
def _UpperCamelCase (a__ :TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _UpperCamelCase (a__ :List[Any] , a__ :Tuple , a__ :Union[str, Any] , a__ :Any , a__ :str , a__ :Optional[Any] , a__ :List[Any] , a__ :List[str] , ):
"""simple docstring"""
for itera in range(a__ ):
open_list[itera].remove_element(a__ )
# print("s", s)
# print("j", j)
((UpperCamelCase__) , (UpperCamelCase__)) = s
UpperCamelCase__ = (x - 1, y)
UpperCamelCase__ = (x + 1, y)
UpperCamelCase__ = (x, y + 1)
UpperCamelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a__ )
UpperCamelCase__ = -1
UpperCamelCase__ = float("""inf""" )
if valid(a__ ) and g_function[neighbours] > g_function[s] + 1:
UpperCamelCase__ = g_function[s] + 1
UpperCamelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(a__ , key(a__ , 0 , a__ , a__ ) )
if neighbours not in close_list_inad:
for var in range(1 , a__ ):
if key(a__ , a__ , a__ , a__ ) <= Wa * key(
a__ , 0 , a__ , a__ ):
open_list[j].put(
a__ , key(a__ , a__ , a__ , a__ ) )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCamelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase__ = make_common_ground()
UpperCamelCase__ = blocks_blk
# hyper parameters
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 20
UpperCamelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (n - 1, n - 1)
UpperCamelCase__ = 1
def _UpperCamelCase (a__ :TPos , a__ :TPos , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = {start: 0, goal: float("""inf""" )}
UpperCamelCase__ = {start: -1, goal: -1}
UpperCamelCase__ = []
UpperCamelCase__ = set()
for i in range(a__ ):
open_list.append(PriorityQueue() )
open_list[i].put(a__ , key(a__ , a__ , a__ , a__ ) )
UpperCamelCase__ = []
UpperCamelCase__ = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , a__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(a__ , a__ , a__ )
else:
UpperCamelCase__ , UpperCamelCase__ = open_list[i].top_show()
visited.add(a__ )
expand_state(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
close_list_inad.append(a__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(a__ , a__ , a__ )
else:
UpperCamelCase__ = open_list[0].top_show()
visited.add(a__ )
expand_state(
a__ , 0 , a__ , a__ , a__ , a__ , a__ , a__ , )
close_list_anchor.append(a__ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a__ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 548
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowercase__ , speech_processor=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , feature_extractor=lowercase__ , )
def __UpperCamelCase ( self , lowercase__ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
__A =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__=1_6_0_0_0 , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 5_0 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , **lowercase__ , ):
'''simple docstring'''
__A =self.speech_processor.feature_extractor(
lowercase__ , return_tensors='''pt''' , sampling_rate=lowercase__ ).input_features.to(self.device )
__A =self.speech_model.generate(lowercase__ , max_length=4_8_0_0_0_0 )
__A =self.speech_processor.tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ , normalize=lowercase__ )[
0
]
if isinstance(lowercase__ , lowercase__ ):
__A =1
elif isinstance(lowercase__ , lowercase__ ):
__A =len(lowercase__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowercase__ )}.''' )
# get prompt text embeddings
__A =self.tokenizer(
lowercase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__A =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__A =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__A =text_input_ids[:, : self.tokenizer.model_max_length]
__A =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__A , __A , __A =text_embeddings.shape
__A =text_embeddings.repeat(1 , lowercase__ , 1 )
__A =text_embeddings.view(bs_embed * num_images_per_prompt , lowercase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__A =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__A =42
if negative_prompt is None:
__A =[''''''] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !='''
f''' {type(lowercase__ )}.''' )
elif isinstance(lowercase__ , lowercase__ ):
__A =[negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
__A =negative_prompt
__A =text_input_ids.shape[-1]
__A =self.tokenizer(
lowercase__ , padding='''max_length''' , max_length=lowercase__ , truncation=lowercase__ , return_tensors='''pt''' , )
__A =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__A =uncond_embeddings.shape[1]
__A =uncond_embeddings.repeat(1 , lowercase__ , 1 )
__A =uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__A =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__A =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__A =torch.randn(lowercase__ , generator=lowercase__ , device='''cpu''' , dtype=lowercase__ ).to(
self.device )
else:
__A =torch.randn(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__A =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__A =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__A =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__A ={}
if accepts_eta:
__A =eta
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
__A =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A =self.scheduler.scale_model_input(lowercase__ , lowercase__ )
# predict the noise residual
__A =self.unet(lowercase__ , lowercase__ , encoder_hidden_states=lowercase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__A , __A =noise_pred.chunk(2 )
__A =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__A =self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ , lowercase__ )
__A =1 / 0.1_8215 * latents
__A =self.vae.decode(lowercase__ ).sample
__A =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__A =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A =self.numpy_to_pil(lowercase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase__ , nsfw_content_detected=lowercase__ )
| 184
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__A =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__A ='''A painting of a squirrel eating a burger'''
__A =torch.manual_seed(0 )
__A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
__A =output.images
__A =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__A =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__A =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__A ='''A painting of a squirrel eating a burger'''
__A =torch.manual_seed(0 )
__A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
__A =output.images
__A =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__A =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__A =sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__A ='''A painting of a squirrel eating a burger'''
__A =torch.manual_seed(0 )
__A =sd_pipe(
[prompt] , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=lowercase__ , )
__A =output.images
__A =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__A =np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 184
| 1
|
def lowerCamelCase_ ( _lowercase ) -> int:
if not numbers:
return 0
if not isinstance(_lowercase , (list, tuple) ) or not all(
isinstance(_lowercase , _lowercase ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
__A : Optional[Any] = numbers[0]
for i in range(1 , len(_lowercase ) ):
# update the maximum and minimum subarray products
__A : Tuple = numbers[i]
if number < 0:
__A , __A : Union[str, Any] = min_till_now, max_till_now
__A : int = max(_lowercase , max_till_now * number )
__A : Tuple = min(_lowercase , min_till_now * number )
# update the maximum product found till now
__A : Any = max(_lowercase , _lowercase )
return max_prod
| 387
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def __UpperCAmelCase( self , __UpperCAmelCase = "auto" ):
if slice_size == "auto":
__A : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def __UpperCAmelCase( self ):
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=16_000 , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 50 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ):
__A : List[str] = self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="pt" , sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
__A : Any = self.speech_model.generate(__UpperCAmelCase , max_length=480_000 )
__A : List[str] = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[Any] = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCAmelCase )}." )
# get prompt text embeddings
__A : Optional[int] = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__A : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__A : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__A : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
__A : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__A , __A , __A : str = text_embeddings.shape
__A : Optional[int] = text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
__A : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__A : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__A : List[str]
if negative_prompt is None:
__A : Dict = [""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="
F" {type(__UpperCAmelCase )}." )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Any = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
__A : int = negative_prompt
__A : int = text_input_ids.shape[-1]
__A : Any = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
__A : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__A : Union[str, Any] = uncond_embeddings.shape[1]
__A : List[str] = uncond_embeddings.repeat(1 , __UpperCAmelCase , 1 )
__A : int = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__A : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__A : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__A : Tuple = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
__A : List[Any] = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__A : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__A : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__A : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__A : List[str] = {}
if accepts_eta:
__A : Tuple = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__A : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
__A : List[Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__A , __A : str = noise_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__A : Union[str, Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__A : int = 1 / 0.1_82_15 * latents
__A : Union[str, Any] = self.vae.decode(__UpperCAmelCase ).sample
__A : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : List[str] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 387
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case__ :
def __init__( self : Any , __a : int , __a : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
__snake_case : Optional[Any] = list(lowerCAmelCase__ )
__snake_case : Dict = degree
def __add__( self : int , __a : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
__snake_case : Union[str, Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase__ )
else:
__snake_case : int = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase__ )
def __sub__( self : str , __a : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __a : Polynomial ) -> Polynomial:
'''simple docstring'''
__snake_case : Optional[Any] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase__ )
def A_ ( self : List[Any] , __a : int | float ) -> int | float:
'''simple docstring'''
__snake_case : Dict = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> str:
'''simple docstring'''
__snake_case : List[str] = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase__ )
return polynomial
def __repr__( self : Dict ) -> str:
'''simple docstring'''
return self.__str__()
def A_ ( self : Tuple ) -> Polynomial:
'''simple docstring'''
__snake_case : Tuple = [0] * self.degree
for i in range(self.degree ):
__snake_case : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase__ )
def A_ ( self : Tuple , __a : int | float = 0 ) -> Polynomial:
'''simple docstring'''
__snake_case : Tuple = [0] * (self.degree + 2)
__snake_case : Any = constant
for i in range(self.degree + 1 ):
__snake_case : List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase__ )
def __eq__( self : Optional[Any] , __a : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __a : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase__ )
| 286
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase : str = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : str=32 ) -> Optional[Any]:
set_seed(0 )
snake_case__ = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
snake_case__ = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
snake_case__ = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
snake_case__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case__ = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case__ = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case__ , snake_case__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case__ = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case__ = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case__ , snake_case__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case__ = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case__ = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
| 214
| 0
|
from __future__ import annotations
class snake_case_ :
'''simple docstring'''
def __init__( self, A_=None ) -> Union[str, Any]:
UpperCAmelCase__ =data
UpperCAmelCase__ =None
def __repr__( self ) -> str:
UpperCAmelCase__ =[]
UpperCAmelCase__ =self
while temp:
string_rep.append(f"""{temp.data}""" )
UpperCAmelCase__ =temp.next
return "->".join(A_ )
def _UpperCAmelCase ( A ):
'''simple docstring'''
if not elements_list:
raise Exception("The Elements List is empty" )
UpperCAmelCase__ =UpperCAmelCase__ =Node(elements_list[0] )
for i in range(1 , len(A ) ):
UpperCAmelCase__ =Node(elements_list[i] )
UpperCAmelCase__ =current.next
return head
def _UpperCAmelCase ( A ):
'''simple docstring'''
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def _UpperCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCAmelCase__ =make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A )
print("Elements in Reverse:" )
print_reverse(A )
if __name__ == "__main__":
main()
| 510
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'gpt_bigcode'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, A_=5_0257, A_=1024, A_=768, A_=12, A_=12, A_=None, A_="gelu_pytorch_tanh", A_=0.1, A_=0.1, A_=0.1, A_=1E-5, A_=0.02, A_=True, A_=True, A_=5_0256, A_=5_0256, A_=True, A_=True, A_=True, **A_, ) -> Dict:
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =n_positions
UpperCAmelCase__ =n_embd
UpperCAmelCase__ =n_layer
UpperCAmelCase__ =n_head
UpperCAmelCase__ =n_inner
UpperCAmelCase__ =activation_function
UpperCAmelCase__ =resid_pdrop
UpperCAmelCase__ =embd_pdrop
UpperCAmelCase__ =attn_pdrop
UpperCAmelCase__ =layer_norm_epsilon
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =scale_attn_weights
UpperCAmelCase__ =use_cache
UpperCAmelCase__ =attention_softmax_in_fpaa
UpperCAmelCase__ =scale_attention_softmax_in_fpaa
UpperCAmelCase__ =multi_query
UpperCAmelCase__ =bos_token_id
UpperCAmelCase__ =eos_token_id
super().__init__(bos_token_id=A_, eos_token_id=A_, **A_ )
| 510
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A: Optional[Any] = logging.get_logger(__name__)
A: str = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = 'mctct'
def __init__( self , _SCREAMING_SNAKE_CASE=8065 , _SCREAMING_SNAKE_CASE=1536 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=6144 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=384 , _SCREAMING_SNAKE_CASE=920 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=(7,) , _SCREAMING_SNAKE_CASE=(3,) , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="sum" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Optional[Any] = attention_head_dim
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : Union[str, Any] = layer_norm_eps
UpperCAmelCase : List[str] = layerdrop
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : Dict = bos_token_id
UpperCAmelCase : Union[str, Any] = eos_token_id
UpperCAmelCase : str = conv_glu_dim
UpperCAmelCase : Any = conv_dropout
UpperCAmelCase : Tuple = num_conv_layers
UpperCAmelCase : Dict = input_feat_per_channel
UpperCAmelCase : Dict = input_channels
UpperCAmelCase : str = conv_channels
UpperCAmelCase : List[Any] = ctc_loss_reduction
UpperCAmelCase : List[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCAmelCase : Union[str, Any] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = list(_SCREAMING_SNAKE_CASE )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 160
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A: Optional[int] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _snake_case ( UpperCamelCase : List[str]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Tuple = None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = dataset_module_factory(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE , hash=dataset_module.hash , )
UpperCAmelCase : Optional[int] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_SCREAMING_SNAKE_CASE ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
@pytest.mark.integration
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : str = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
UpperCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase : List[str] = None
builder_instance.download_and_prepare()
UpperCAmelCase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
UpperCAmelCase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase , UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 160
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
_lowerCAmelCase = "swinv2"
_lowerCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1e-5 , _lowercase=32 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_a )
__a : Optional[int] = image_size
__a : List[Any] = patch_size
__a : List[str] = num_channels
__a : Optional[Any] = embed_dim
__a : List[str] = depths
__a : List[Any] = len(_a )
__a : str = num_heads
__a : Optional[int] = window_size
__a : List[str] = mlp_ratio
__a : Union[str, Any] = qkv_bias
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[str] = drop_path_rate
__a : Optional[int] = hidden_act
__a : List[str] = use_absolute_embeddings
__a : Dict = layer_norm_eps
__a : Optional[Any] = initializer_range
__a : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a : str = int(embed_dim * 2 ** (len(_a ) - 1) )
__a : Tuple = (0, 0, 0, 0)
| 710
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __a ( A__ ):
_lowerCAmelCase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_lowerCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowerCAmelCase : str = "audio"
_lowerCAmelCase : str = "labels"
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ : Optional[int] = copy.deepcopy(self )
UpperCamelCase__ : int = self.label_schema.copy()
UpperCamelCase__ : str = features[self.label_column]
UpperCamelCase__ : int = label_schema
return task_template
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 228
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size
UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : str = ocr_lang
UpperCamelCase__ : List[str] = tesseract_config
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : Tuple = words_batch
UpperCamelCase__ : Dict = boxes_batch
return data
| 228
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def __snake_case ( UpperCAmelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
lowerCamelCase_ = nums[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
lowerCamelCase_ = nums[i]
lowerCamelCase_ = max(UpperCAmelCase_ , ans + num , UpperCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
a_ : Any = int(input("""Enter number of elements : """).strip())
a_ : Union[str, Any] = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 445
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : str = """src/diffusers"""
a_ : int = """."""
# This is to make sure the diffusers module imported is the one in the repo.
a_ : int = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : List[Any] = spec.loader.load_module()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
return line.startswith(UpperCAmelCase_ ) or len(UpperCAmelCase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase_ ) is not None
def __snake_case ( UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = object_name.split("." )
lowerCamelCase_ = 0
# First let's find the module where our object lives.
lowerCamelCase_ = parts[i]
while i < len(UpperCAmelCase_ ) and not os.path.isfile(os.path.join(UpperCAmelCase_ , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase_ ):
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , parts[i] )
if i >= len(UpperCAmelCase_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase_ , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ = ""
lowerCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase_ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ = line_index
while line_index < len(UpperCAmelCase_ ) and _should_continue(lines[line_index] , UpperCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
return "".join(UpperCAmelCase_ )
a_ : Optional[Any] = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
a_ : Optional[int] = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
a_ : List[str] = re.compile(R"""<FILL\s+[^>]*>""")
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = code.split("\n" )
lowerCamelCase_ = 0
while idx < len(UpperCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase_ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __snake_case ( UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ = len(get_indent(UpperCAmelCase_ ) ) > 0
if has_indent:
lowerCamelCase_ = F'''class Bla:\n{code}'''
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase_ )
lowerCamelCase_ = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = style_docstrings_in_code(UpperCAmelCase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False ):
with open(UpperCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
lowerCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase_ ):
lowerCamelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = search.groups()
lowerCamelCase_ = find_code_in_diffusers(UpperCAmelCase_ )
lowerCamelCase_ = get_indent(UpperCAmelCase_ )
lowerCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ = theoretical_indent
lowerCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ = True
while line_index < len(UpperCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase_ ):
break
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _should_continue(UpperCAmelCase_ , UpperCAmelCase_ ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
lowerCamelCase_ = "".join(UpperCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase_ ) is None]
lowerCamelCase_ = "\n".join(UpperCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = replace_pattern.replace("with" , "" ).split("," )
lowerCamelCase_ = [_re_replace_pattern.search(UpperCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = pattern.groups()
lowerCamelCase_ = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if option.strip() == "all-casing":
lowerCamelCase_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase_ )
lowerCamelCase_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ = start_index + 1
if overwrite and len(UpperCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase_ )
return diffs
def __snake_case ( UpperCAmelCase_ : bool = False ):
lowerCamelCase_ = glob.glob(os.path.join(UpperCAmelCase_ , "**/*.py" ) , recursive=UpperCAmelCase_ )
lowerCamelCase_ = []
for filename in all_files:
lowerCamelCase_ = is_copy_consistent(UpperCAmelCase_ , UpperCAmelCase_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = "\n".join(UpperCAmelCase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 445
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case : str = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
snake_case : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
snake_case : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ):
return float((preds == labels).mean() )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
a__ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
a__ = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ):
a__ = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
a__ = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :str ) -> Any:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :List[str] ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case ,__snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case ,__snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case ,__snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case ,__snake_case )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 335
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"""embed_dim""" ) )
self.parent.assertTrue(hasattr(A ,"""num_heads""" ) )
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=64 ,A=3 ,A=[16, 48, 96] ,A=[1, 3, 6] ,A=[1, 2, 10] ,A=[7, 3, 3] ,A=[4, 2, 2] ,A=[2, 1, 1] ,A=[2, 2, 2] ,A=[False, False, True] ,A=[0.0, 0.0, 0.0] ,A=0.02 ,A=1e-1_2 ,A=True ,A=True ,A=2 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = num_labels
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = stride_kv
UpperCAmelCase = depth
UpperCAmelCase = cls_token
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = TFCvtModel(config=A )
UpperCAmelCase = model(A ,training=A )
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase , UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFCvtForImageClassification(A )
UpperCAmelCase = model(A ,labels=A ,training=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFCvtModelTester(self )
UpperCAmelCase = TFCvtConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def _UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,)
def _UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,)
@slow
def _UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def _UpperCamelCase ( self ):
def check_hidden_states_output(A ,A ,A ):
UpperCAmelCase = model_class(A )
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(A ) ,A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFCvtModel.from_pretrained(A )
self.assertIsNotNone(A )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,A ,atol=1e-4 ) )
| 74
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 60_08_51_47_51_43 ) -> int:
"""simple docstring"""
try:
_UpperCamelCase = int(__snake_case )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
_UpperCamelCase = 2
_UpperCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_UpperCamelCase = i
while n % i == 0:
_UpperCamelCase = n // i
i += 1
return int(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="xmod"
def __init__( self , lowerCamelCase=3_0522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("en_XX",) , lowerCamelCase=None , **lowerCamelCase , ) ->Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
__a = pre_norm
__a = adapter_reduction_factor
__a = adapter_layer_norm
__a = adapter_reuse_layer_norm
__a = ln_before_adapter
__a = list(lowerCamelCase )
__a = default_language
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def __UpperCamelCase ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 715
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="vivit"
def __init__( self , lowerCamelCase=224 , lowerCamelCase=32 , lowerCamelCase=[2, 16, 16] , lowerCamelCase=3 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu_fast" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=True , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**lowerCamelCase )
| 270
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__lowerCAmelCase : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {}
with open(lowerCamelCase__ , """r""" ) as file:
for line_number, line in enumerate(lowerCamelCase__ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowerCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowerCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = """.""".join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if """lm_head""" in full_key else value[0]
__lowerCAmelCase : Optional[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowerCAmelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = """weight"""
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return is_used
return is_used
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ = name.split(""".""" )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCamelCase__ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCamelCase__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
feature_extractor.save_pretrained(lowerCamelCase__ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCamelCase__ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCamelCase__ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 644
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCAmelCase : Dict = "CompVis/stable-diffusion-v1-1"
__lowerCAmelCase : int = "CompVis/stable-diffusion-v1-2"
__lowerCAmelCase : int = "CompVis/stable-diffusion-v1-3"
__lowerCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4"
class a_ ( __UpperCamelCase ):
def __init__( self : List[Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case__ : StableDiffusionSafetyChecker , snake_case__ : CLIPImageProcessor , snake_case__ : bool = True , ):
super()._init_()
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ )
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ )
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(snake_case__ )
lowerCAmelCase__ = StableDiffusionPipeline(
vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , requires_safety_checker=snake_case__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return {k: getattr(self , snake_case__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : List[Any] , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Tuple , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Optional[Any] , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : str , ):
return self.pipea(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Optional[Any] , ):
lowerCAmelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(snake_case__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase__ = self.textaimg_sda_a(
prompt=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , **snake_case__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 1
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
random.seed(__UpperCAmelCase )
np.random.seed(__UpperCAmelCase )
torch.manual_seed(__UpperCAmelCase )
torch.cuda.manual_seed_all(__UpperCAmelCase )
# ^^ safe to call this function even if cuda is not available
class a_ :
def __init__( self : Union[str, Any] , a_ : Iterable[torch.nn.Parameter] , a_ : float = 0.9_9_9_9 , a_ : float = 0.0 , a_ : int = 0 , a_ : bool = False , a_ : Union[float, int] = 1.0 , a_ : Union[float, int] = 2 / 3 , a_ : Optional[Any] = None , a_ : Dict[str, Any] = None , **a_ : Optional[Any] , ) -> List[Any]:
if isinstance(a_ , torch.nn.Module ):
snake_case: List[Any] =(
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , a_ , standard_warn=a_ , )
snake_case: str =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case: str =True
if kwargs.get('max_value' , a_ ) is not None:
snake_case: Optional[int] ='The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , a_ , standard_warn=a_ )
snake_case: Union[str, Any] =kwargs['max_value']
if kwargs.get('min_value' , a_ ) is not None:
snake_case: Tuple ='The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , a_ , standard_warn=a_ )
snake_case: Dict =kwargs['min_value']
snake_case: Dict =list(a_ )
snake_case: Union[str, Any] =[p.clone().detach() for p in parameters]
if kwargs.get('device' , a_ ) is not None:
snake_case: Any ='The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , a_ , standard_warn=a_ )
self.to(device=kwargs['device'] )
snake_case: int =None
snake_case: Dict =decay
snake_case: str =min_decay
snake_case: Dict =update_after_step
snake_case: Optional[Any] =use_ema_warmup
snake_case: Dict =inv_gamma
snake_case: Optional[int] =power
snake_case: List[str] =0
snake_case: Optional[Any] =None # set in `step()`
snake_case: List[Any] =model_cls
snake_case: int =model_config
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , a_ : Optional[Any] , a_ : Tuple ) -> "EMAModel":
snake_case , snake_case: Dict =model_cls.load_config(a_ , return_unused_kwargs=a_ )
snake_case: Any =model_cls.from_pretrained(a_ )
snake_case: Any =cls(model.parameters() , model_cls=a_ , model_config=model.config )
ema_model.load_state_dict(a_ )
return ema_model
def UpperCamelCase ( self : Optional[Any] , a_ : str ) -> int:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
snake_case: int =self.model_cls.from_config(self.model_config )
snake_case: Dict =self.state_dict()
state_dict.pop('shadow_params' , a_ )
model.register_to_config(**a_ )
self.copy_to(model.parameters() )
model.save_pretrained(a_ )
def UpperCamelCase ( self : Any , a_ : int ) -> float:
snake_case: List[Any] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case: Dict =1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case: Union[str, Any] =(1 + step) / (1_0 + step)
snake_case: int =min(a_ , self.decay )
# make sure decay is not smaller than min_decay
snake_case: Optional[int] =max(a_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCamelCase ( self : Optional[int] , a_ : Iterable[torch.nn.Parameter] ) -> int:
if isinstance(a_ , torch.nn.Module ):
snake_case: str =(
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , a_ , standard_warn=a_ , )
snake_case: Dict =parameters.parameters()
snake_case: Dict =list(a_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case: Optional[Any] =self.get_decay(self.optimization_step )
snake_case: List[Any] =decay
snake_case: int =1 - decay
snake_case: Optional[int] =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case: List[Any] =deepspeed.zero.GatheredParameters(a_ , modifier_rank=a_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a_ )
def UpperCamelCase ( self : Dict , a_ : Iterable[torch.nn.Parameter] ) -> None:
snake_case: str =list(a_ )
for s_param, param in zip(self.shadow_params , a_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCamelCase ( self : Dict , a_ : str=None , a_ : Any=None ) -> None:
snake_case: Tuple =[
p.to(device=a_ , dtype=a_ ) if p.is_floating_point() else p.to(device=a_ )
for p in self.shadow_params
]
def UpperCamelCase ( self : List[Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCamelCase ( self : List[str] , a_ : Iterable[torch.nn.Parameter] ) -> None:
snake_case: Optional[Any] =[param.detach().cpu().clone() for param in parameters]
def UpperCamelCase ( self : Union[str, Any] , a_ : Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , a_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case: Union[str, Any] =None
def UpperCamelCase ( self : Tuple , a_ : dict ) -> None:
snake_case: List[str] =copy.deepcopy(a_ )
snake_case: int =state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
snake_case: List[str] =state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , a_ ):
raise ValueError('Invalid min_decay' )
snake_case: List[str] =state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , a_ ):
raise ValueError('Invalid optimization_step' )
snake_case: str =state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , a_ ):
raise ValueError('Invalid update_after_step' )
snake_case: Any =state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a_ ):
raise ValueError('Invalid use_ema_warmup' )
snake_case: List[Any] =state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
snake_case: List[str] =state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
snake_case: Optional[int] =state_dict.get('shadow_params' , a_ )
if shadow_params is not None:
snake_case: List[str] =shadow_params
if not isinstance(self.shadow_params , a_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(a_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 347
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : Any = GPTaTokenizer
UpperCAmelCase : Union[str, Any] = GPTaTokenizerFast
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = {"""add_prefix_space""": True}
UpperCAmelCase : str = False
def UpperCamelCase ( self : List[str] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case: Union[str, Any] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
snake_case: Optional[int] =dict(zip(a_ , range(len(a_ ) ) ) )
snake_case: Optional[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case: Dict ={'unk_token': '<unk>'}
snake_case: List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a_ ) )
def UpperCamelCase ( self : Optional[Any] , **a_ : List[str] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : Dict , **a_ : Any ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : List[str] , a_ : List[Any] ) -> Union[str, Any]:
snake_case: Any ='lower newer'
snake_case: Tuple ='lower newer'
return input_text, output_text
def UpperCamelCase ( self : int ) -> Any:
snake_case: Any =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case: int ='lower newer'
snake_case: str =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case: Optional[Any] =tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
snake_case: Optional[int] =tokens + [tokenizer.unk_token]
snake_case: List[Any] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def UpperCamelCase ( self : Any ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case: Tuple =self.get_tokenizer()
snake_case: List[Any] =self.get_rust_tokenizer(add_prefix_space=a_ )
snake_case: Any ='lower newer'
# Testing tokenization
snake_case: Optional[Any] =tokenizer.tokenize(a_ , add_prefix_space=a_ )
snake_case: Union[str, Any] =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
snake_case: Dict =tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case: Any =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
snake_case: str =self.get_rust_tokenizer(add_prefix_space=a_ )
snake_case: Dict =tokenizer.encode(a_ , add_prefix_space=a_ )
snake_case: Dict =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
snake_case: List[str] =tokens + [rust_tokenizer.unk_token]
snake_case: Optional[int] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def UpperCamelCase ( self : List[str] , *a_ : Tuple , **a_ : Tuple ) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self : Dict , a_ : List[Any]=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case: Dict =self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
snake_case: List[str] ='This is a simple input'
snake_case: Optional[int] =['This is a simple input 1', 'This is a simple input 2']
snake_case: Dict =('This is a simple input', 'This is a pair')
snake_case: Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='max_length' )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='max_length' )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='max_length' , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='max_length' )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='max_length' )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='max_length' , )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
snake_case: Optional[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
snake_case: List[Any] ='This is a simple input'
snake_case: Tuple =['This is a simple input looooooooong', 'This is a simple input']
snake_case: Union[str, Any] =('This is a simple input', 'This is a pair')
snake_case: List[Any] =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
snake_case: Any =tokenizer.pad_token_id
snake_case: List[str] =tokenizer(a_ , padding='max_length' , max_length=3_0 , return_tensors='np' )
snake_case: Dict =tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='np' )
snake_case: Tuple =tokenizer(*a_ , padding='max_length' , max_length=6_0 , return_tensors='np' )
snake_case: Dict =tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase ( self : str ) -> Optional[Any]:
snake_case: Tuple ='$$$'
snake_case: Any =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
snake_case: Optional[Any] ='This is a simple input'
snake_case: Any =['This is a simple input 1', 'This is a simple input 2']
snake_case: Any =tokenizer.bos_token_id
snake_case: Dict =tokenizer(a_ )
snake_case: int =tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case: Optional[int] =tokenizer.decode(out_s.input_ids )
snake_case: Optional[Any] =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
pass
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
snake_case: int =[self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case: List[str] ='Encode this.'
snake_case: List[Any] ='This one too please.'
snake_case: Union[str, Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Any =tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
snake_case: Dict =encoded_sequence_dict['input_ids']
snake_case: str =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(a_ ) , len(a_ ) )
snake_case: Dict =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
snake_case: int =[x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> Optional[int]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
snake_case: Optional[int] =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=a_ )
snake_case: List[Any] ='A photo of a cat'
snake_case: List[Any] =tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
snake_case: Union[str, Any] =AutoTokenizer.from_pretrained('./test_opt' )
snake_case: Union[str, Any] =tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def UpperCamelCase ( self : Any ) -> Tuple:
snake_case: Optional[Any] =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=a_ )
snake_case: List[str] ='A photo of a cat'
snake_case: Optional[int] =tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
snake_case: Dict =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=a_ )
snake_case: Dict ='bos'
snake_case: Union[str, Any] =tokenizer.get_vocab()['bos']
snake_case: Tuple ='A photo of a cat'
snake_case: str =tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
snake_case: List[str] =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
snake_case: Dict =tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 347
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'mgp-str'
def __init__( self ,_lowerCAmelCase=[32, 1_28] ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=27 ,_lowerCAmelCase=38 ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = max_token_length
lowerCamelCase__ = num_character_labels
lowerCamelCase__ = num_bpe_labels
lowerCamelCase__ = num_wordpiece_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = distilled
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = drop_rate
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = attn_drop_rate
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = output_aa_attentions
lowerCamelCase__ = initializer_range
| 50
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
| 1
|
def _A ( lowerCamelCase ):
a__ : Optional[int] = [[0 for _ in range(lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
a__ : Tuple = 1
for n in range(m + 1 ):
for k in range(1 , lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
SCREAMING_SNAKE_CASE__ : Any = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 705
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''distilbert'''
__magic_name__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : str , lowerCAmelCase_ : Dict=30_522 , lowerCAmelCase_ : Optional[int]=512 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : List[str]=768 , lowerCAmelCase_ : Optional[Any]=4 * 768 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Optional[Any]=0.0_2 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=0.2 , lowerCAmelCase_ : int=0 , **lowerCAmelCase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Tuple = sinusoidal_pos_embds
UpperCAmelCase_ : Optional[int] = n_layers
UpperCAmelCase_ : Optional[Any] = n_heads
UpperCAmelCase_ : Optional[int] = dim
UpperCAmelCase_ : List[Any] = hidden_dim
UpperCAmelCase_ : Optional[Any] = dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = activation
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : List[str] = qa_dropout
UpperCAmelCase_ : List[str] = seq_classif_dropout
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 95
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''informer'''
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = None , lowerCAmelCase_ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 100 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str = "prob" , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> Tuple:
# time series specific configuration
UpperCAmelCase_ : str = prediction_length
UpperCAmelCase_ : Tuple = context_length or prediction_length
UpperCAmelCase_ : Any = distribution_output
UpperCAmelCase_ : Union[str, Any] = loss
UpperCAmelCase_ : Any = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : List[Any] = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Optional[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase_ : Any = embedding_dimension
else:
UpperCAmelCase_ : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Optional[Any] = use_cache
# Informer
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : List[str] = sampling_factor
UpperCAmelCase_ : Union[str, Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95
| 1
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__UpperCAmelCase = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def A_ ( lowercase_ , lowercase_ ) ->Any:
"""simple docstring"""
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , 'sklearn' )
return (preds == labels).mean()
def A_ ( lowercase_ , lowercase_ ) ->str:
"""simple docstring"""
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , 'sklearn' )
SCREAMING_SNAKE_CASE = simple_accuracy(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = fa_score(y_true=lowercase_ , y_pred=lowercase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A_ ( lowercase_ , lowercase_ ) ->Union[str, Any]:
"""simple docstring"""
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , 'sklearn' )
SCREAMING_SNAKE_CASE = pearsonr(lowercase_ , lowercase_ )[0]
SCREAMING_SNAKE_CASE = spearmanr(lowercase_ , lowercase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->str:
"""simple docstring"""
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , 'sklearn' )
assert len(lowercase_ ) == len(lowercase_ ), f'''Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase_ , lowercase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase_ , lowercase_ )
elif task_name == "qqp":
return acc_and_fa(lowercase_ , lowercase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->Union[str, Any]:
"""simple docstring"""
warnings.warn(lowercase_ , lowercase_ )
requires_backends(lowercase_ , 'sklearn' )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(lowercase_ )} and {len(lowercase_ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(lowercase_ )
| 259
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
SCREAMING_SNAKE_CASE = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , 'new_code.py')
with open(lowerCAmelCase__ , 'w' , newline='\n') as f:
f.write(lowerCAmelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__)
with open(lowerCAmelCase__ , 'r') as f:
self.assertTrue(f.read() , lowerCAmelCase__)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase__ , lowerCAmelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
self.assertFalse(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 259
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Any=None , ):
if attention_mask is None:
__a : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__a : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__a : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCamelCase:
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE__ : str=1_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , ):
'''simple docstring'''
__a : Dict = parent
__a : Optional[int] = batch_size
__a : int = seq_length
__a : Any = is_training
__a : Tuple = use_labels
__a : Dict = vocab_size
__a : List[str] = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : List[str] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : str = eos_token_id
__a : str = pad_token_id
__a : Optional[Any] = bos_token_id
__a : Union[str, Any] = initializer_range
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : Optional[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
__a : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , )
__a : Optional[int] = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a , __a : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : Union[str, Any] = 2_0
__a : int = model_class_name(SCREAMING_SNAKE_CASE__ )
__a : str = model.encode(inputs_dict['input_ids'] )
__a , __a : Tuple = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : List[str] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__a : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Dict = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : Dict = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = 2_0
__a : List[str] = model_class_name(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = model.encode(inputs_dict['input_ids'] )
__a , __a : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__a : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Dict = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : List[str] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__a : int = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
__a : Dict = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
__a : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 99
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__a : Any = input_ids.shape[0]
__a : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a , __a , __a : List[Any] = self._get_config_and_data()
__a : str = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ )
__a : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__a : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__a : Tuple = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__a : Optional[Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__a : Optional[int] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : int = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__a : Optional[int] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
__a : List[str] = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
__a : Any = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(SCREAMING_SNAKE_CASE__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase , __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Any = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[Any] = FlaxBlenderbotModelTester(self )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : List[Any] ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest('JIT Enabled' ):
__a : Dict = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : Dict = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
__a : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__a : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest('JIT Enabled' ):
__a : List[Any] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : str = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : List[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
__a : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : int = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
__a : Dict = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__a : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__a : Tuple = ['Sam']
__a : int = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='jax' )
__a : Any = model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = 'Sam is a great name. It means "sun" in Gaelic.'
__a : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
assert generated_txt[0].strip() == tgt_text
| 47
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = hidden_size
__a : Optional[Any] = feat_extract_norm
__a : List[str] = feat_extract_activation
__a : Dict = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
__a : List[str] = list(SCREAMING_SNAKE_CASE__ )
__a : int = conv_bias
__a : Tuple = num_conv_pos_embeddings
__a : List[str] = num_conv_pos_embedding_groups
__a : Optional[Any] = len(self.conv_dim )
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = squeeze_factor
__a : List[Any] = max_position_embeddings
__a : Tuple = position_buckets
__a : Optional[int] = share_att_key
__a : List[str] = relative_attention
__a : Any = norm_rel_ebd
__a : Any = list(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = hidden_act
__a : str = num_attention_heads
__a : Union[str, Any] = hidden_dropout
__a : Optional[int] = attention_dropout
__a : List[str] = activation_dropout
__a : int = feat_proj_dropout
__a : int = final_dropout
__a : Dict = layer_norm_eps
__a : Tuple = feature_layer_norm_eps
__a : str = initializer_range
__a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Tuple = apply_spec_augment
__a : Optional[Any] = mask_time_prob
__a : Any = mask_time_length
__a : List[str] = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : Any = mask_feature_min_masks
# ctc loss
__a : Optional[int] = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# sequence classification
__a : Dict = use_weighted_layer_sum
__a : Optional[Any] = classifier_proj_size
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 1
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653
|
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = (DPMSolverSinglestepScheduler,)
lowerCAmelCase = (('num_inference_steps', 2_5),)
def _UpperCAmelCase ( self , **a__ ) -> List[str]:
A = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**a__ )
return config
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> Optional[int]:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[: new_scheduler.config.solver_order]
A , A = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ) -> List[str]:
pass
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> Dict:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[: new_scheduler.config.solver_order]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , a__=None , **a__ ) -> List[Any]:
if scheduler is None:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A = 50
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.25_74 ) < 1e-3
def _UpperCAmelCase ( self ) -> Tuple:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def _UpperCAmelCase ( self ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A = self.full_loop(scheduler=a__ )
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
A = DEISMultistepScheduler.from_config(scheduler.config )
A = DPMSolverMultistepScheduler.from_config(scheduler.config )
A = UniPCMultistepScheduler.from_config(scheduler.config )
A = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A = self.full_loop(scheduler=a__ )
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def _UpperCAmelCase ( self ) -> int:
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type="""dpmsolver++""" , solver_order=a__ , solver_type=a__ , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def _UpperCAmelCase ( self ) -> List[str]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
A = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def _UpperCAmelCase ( self ) -> Dict:
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def _UpperCAmelCase ( self ) -> Dict:
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCAmelCase ( self ) -> Dict:
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type="""learned_range""" )
def _UpperCAmelCase ( self ) -> Any:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.full_loop()
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def _UpperCAmelCase ( self ) -> int:
A = self.full_loop(use_karras_sigmas=a__ )
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.22_48 ) < 1e-3
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.full_loop(prediction_type="""v_prediction""" )
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.14_53 ) < 1e-3
def _UpperCAmelCase ( self ) -> Tuple:
A = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=a__ )
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.06_49 ) < 1e-3
def _UpperCAmelCase ( self ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
A = scheduler_class(**a__ )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 641
|
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( UpperCamelCase__: str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
A = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" )
A = soup.findAll("""h1""" )
A = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 641
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__a = split_dict._to_yaml_list()
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
__a = SplitDict._from_yaml_list(__SCREAMING_SNAKE_CASE )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__a = None
# the split name of split_dict takes over the name of the split info object
__a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=__SCREAMING_SNAKE_CASE ), SplitInfo(dataset_name="""my_dataset""" )] )
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__a = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 201
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE_ = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE_ = BASE_URL + '/user'
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE_ = os.environ.get('USER_TOKEN', '')
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> dict[Any, Any]:
"""simple docstring"""
__a = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 201
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _UpperCAmelCase :
__a : torch.Tensor # [batch_size x 3]
__a : torch.Tensor # [batch_size x 3]
__a : torch.Tensor # [batch_size x 3]
__a : torch.Tensor # [batch_size x 3]
__a : int
__a : int
__a : float
__a : float
__a : Tuple[int]
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __snake_case ( self ) -> torch.Tensor:
'''simple docstring'''
_UpperCAmelCase : str = torch.arange(self.height * self.width )
_UpperCAmelCase : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase , *_UpperCAmelCase : Union[str, Any] = self.shape
_UpperCAmelCase : Optional[int] = int(np.prod(_A ) )
_UpperCAmelCase : Any = self.get_image_coords()
_UpperCAmelCase : int = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_UpperCAmelCase : Any = self.get_camera_rays(_A )
_UpperCAmelCase : Optional[Any] = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __snake_case ( self , _A ) -> torch.Tensor:
'''simple docstring'''
_UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase : str = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_UpperCAmelCase : List[Any] = coords.view(_A , -1 , 2 )
_UpperCAmelCase : Optional[Any] = self.resolution()
_UpperCAmelCase : Tuple = self.fov()
_UpperCAmelCase : Optional[Any] = (flat.float() / (res - 1)) * 2 - 1
_UpperCAmelCase : str = fracs * torch.tan(fov / 2 )
_UpperCAmelCase : Tuple = fracs.view(_A , -1 , 2 )
_UpperCAmelCase : str = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
_UpperCAmelCase : List[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
_UpperCAmelCase : str = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def __snake_case ( self , _A , _A ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase ( _lowerCAmelCase : int ) -> DifferentiableProjectiveCamera:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Union[str, Any] = []
for theta in np.linspace(0, 2 * np.pi, num=20 ):
_UpperCAmelCase : str = np.array([np.sin(_lowerCAmelCase ), np.cos(_lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_UpperCAmelCase : Optional[int] = -z * 4
_UpperCAmelCase : List[Any] = np.array([np.cos(_lowerCAmelCase ), -np.sin(_lowerCAmelCase ), 0.0] )
_UpperCAmelCase : Dict = np.cross(_lowerCAmelCase, _lowerCAmelCase )
origins.append(_lowerCAmelCase )
xs.append(_lowerCAmelCase )
ys.append(_lowerCAmelCase )
zs.append(_lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowerCAmelCase, axis=0 ) ).float(), x=torch.from_numpy(np.stack(_lowerCAmelCase, axis=0 ) ).float(), y=torch.from_numpy(np.stack(_lowerCAmelCase, axis=0 ) ).float(), z=torch.from_numpy(np.stack(_lowerCAmelCase, axis=0 ) ).float(), width=_lowerCAmelCase, height=_lowerCAmelCase, x_fov=0.7, y_fov=0.7, shape=(1, len(_lowerCAmelCase )), )
| 238
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( __a):
__a : Optional[Any] = """SpeechT5FeatureExtractor"""
__a : Dict = """SpeechT5Tokenizer"""
def __init__( self , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , *_A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = kwargs.pop("""audio""" , _A )
_UpperCAmelCase : Tuple = kwargs.pop("""text""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""text_target""" , _A )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""audio_target""" , _A )
_UpperCAmelCase : Any = kwargs.pop("""sampling_rate""" , _A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
elif text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(_A , **_A )
else:
_UpperCAmelCase : Optional[int] = None
if audio_target is not None:
_UpperCAmelCase : List[Any] = self.feature_extractor(audio_target=_A , *_A , sampling_rate=_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_values"""]
elif text_target is not None:
_UpperCAmelCase : Optional[int] = self.tokenizer(_A , **_A )
_UpperCAmelCase : Union[str, Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : List[str] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[int] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("""input_values""" , _A )
_UpperCAmelCase : List[Any] = kwargs.pop("""input_ids""" , _A )
_UpperCAmelCase : int = kwargs.pop("""labels""" , _A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
_UpperCAmelCase : Optional[int] = self.feature_extractor.pad(_A , *_A , **_A )
elif input_ids is not None:
_UpperCAmelCase : Tuple = self.tokenizer.pad(_A , **_A )
else:
_UpperCAmelCase : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_A , _A ) and "input_ids" in labels[0]):
_UpperCAmelCase : Optional[Any] = self.tokenizer.pad(_A , **_A )
_UpperCAmelCase : Optional[Any] = targets["""input_ids"""]
else:
_UpperCAmelCase : List[Any] = self.feature_extractor.feature_size
_UpperCAmelCase : Tuple = self.feature_extractor.num_mel_bins
_UpperCAmelCase : List[str] = self.feature_extractor.pad(_A , *_A , **_A )
_UpperCAmelCase : List[Any] = feature_size_hack
_UpperCAmelCase : Dict = targets["""input_values"""]
else:
_UpperCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase : Union[str, Any] = labels
_UpperCAmelCase : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
_UpperCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def __snake_case ( self , *_A , **_A ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
| 238
| 1
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
A_ : Optional[Any] = "true"
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Optional[Any]=82 , __magic_name__ : Any=16 ) -> int:
'''simple docstring'''
set_seed(42 )
snake_case__ : Optional[int] = RegressionModel()
snake_case__ : List[Any] = deepcopy(__magic_name__ )
snake_case__ : Union[str, Any] = RegressionDataset(length=__magic_name__ )
snake_case__ : Any = DataLoader(__magic_name__ , batch_size=__magic_name__ )
model.to(accelerator.device )
snake_case__ , snake_case__ : Tuple = accelerator.prepare(__magic_name__ , __magic_name__ )
return model, ddp_model, dataloader
def UpperCamelCase__ ( __magic_name__ : Accelerator , __magic_name__ : List[Any]=False ) -> List[str]:
'''simple docstring'''
snake_case__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
snake_case__ : Any = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(__magic_name__ : Dict ):
snake_case__ : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
with accelerator.main_process_first():
snake_case__ : List[str] = dataset.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
snake_case__ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Tuple ):
if use_longest:
return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return DataLoader(__magic_name__ , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=16 )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = Accelerator(dispatch_batches=__magic_name__ , split_batches=__magic_name__ )
snake_case__ : int = get_dataloader(__magic_name__ , not dispatch_batches )
snake_case__ : Any = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=__magic_name__ )
snake_case__ , snake_case__ : int = accelerator.prepare(__magic_name__ , __magic_name__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = []
for batch in dataloader:
snake_case__ , snake_case__ : Tuple = batch.values()
with torch.no_grad():
snake_case__ : List[Any] = model(__magic_name__ )
snake_case__ , snake_case__ : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case__ , snake_case__ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(__magic_name__ )
targs.append(__magic_name__ )
snake_case__ , snake_case__ : Optional[int] = torch.cat(__magic_name__ ), torch.cat(__magic_name__ )
return logits, targs
def UpperCamelCase__ ( __magic_name__ : Accelerator , __magic_name__ : int=82 , __magic_name__ : Optional[int]=False , __magic_name__ : Any=False , __magic_name__ : Dict=16 ) -> int:
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = get_basic_setup(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ , snake_case__ : str = generate_predictions(__magic_name__ , __magic_name__ , __magic_name__ )
assert (
len(__magic_name__ ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__magic_name__ )}"
def UpperCamelCase__ ( __magic_name__ : bool = False , __magic_name__ : bool = False ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Tuple = evaluate.load("""glue""" , """mrpc""" )
snake_case__ , snake_case__ : Optional[Any] = get_mrpc_setup(__magic_name__ , __magic_name__ )
# First do baseline
snake_case__ , snake_case__ , snake_case__ : str = setup["""no"""]
model.to(__magic_name__ )
model.eval()
for batch in dataloader:
batch.to(__magic_name__ )
with torch.inference_mode():
snake_case__ : Dict = model(**__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__magic_name__ , references=batch["""labels"""] )
snake_case__ : List[Any] = metric.compute()
# Then do distributed
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case__ : Any = model(**__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
snake_case__ : Union[str, Any] = batch["""labels"""]
snake_case__ , snake_case__ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__magic_name__ , references=__magic_name__ )
snake_case__ : List[str] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def UpperCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__magic_name__ , __magic_name__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case__ : str = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__magic_name__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
snake_case__ : str = Accelerator()
test_torch_metrics(__magic_name__ , 5_12 )
accelerator.state._reset_state()
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 419
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> str:
'''simple docstring'''
snake_case__ : int = len(__magic_name__ )
snake_case__ : int = len(__magic_name__ )
snake_case__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case__ : list = []
for char_count in range(__magic_name__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__magic_name__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 419
| 1
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase : str = parse(importlib.metadata.version('''torch'''))
def lowerCAmelCase__ ( _a : List[str] , _a : Tuple , _a : List[Any] ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case_ : Tuple = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ : int = parse(importlib.metadata.version(UpperCamelCase__ ) )
return operation(UpperCamelCase__ , parse(UpperCamelCase__ ) )
def lowerCAmelCase__ ( _a : List[Any] , _a : Optional[Any] ):
return compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 568
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowerCamelCase )
class _lowerCAmelCase ( lowerCamelCase ):
def __init__( self , *a_ , **a_ ) -> Optional[int]:
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self , **a_ ) -> List[str]:
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a_ , **a_ ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*a_ , **a_ )
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
_UpperCAmelCase = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
_UpperCAmelCase = target_size
return inputs
def _a ( self , a_ ) -> Optional[Any]:
_UpperCAmelCase = model_inputs.pop("target_size" )
_UpperCAmelCase = self.model(**a_ )
_UpperCAmelCase = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase = model_inputs["bbox"]
return model_outputs
def _a ( self , a_ , a_=0.9 ) -> int:
_UpperCAmelCase = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase = target_size[0].tolist()
def unnormalize(a_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
_UpperCAmelCase = raw_annotations[0]
_UpperCAmelCase = raw_annotation["scores"]
_UpperCAmelCase = raw_annotation["labels"]
_UpperCAmelCase = raw_annotation["boxes"]
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase = ["score", "label", "box"]
_UpperCAmelCase = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self , a_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 657
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( a_ ,a_ ,a_ ,unittest.TestCase ):
__lowerCAmelCase = AltDiffusionPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
a_ : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
a_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
a_ : List[str] = CLIPTextModel(a_ )
a_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
a_ : List[Any] = 7_7
a_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
a_ : int = torch.manual_seed(a_ )
else:
a_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
a_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case_ ( self ):
a_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
a_ : List[str] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
a_ : Optional[int] = RobertaSeriesModelWithTransformation(a_ )
a_ : List[str] = text_encoder
a_ : int = AltDiffusionPipeline(**a_ )
a_ : Dict = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : List[str] = self.get_dummy_inputs(a_ )
a_ : Union[str, Any] = "A photo of an astronaut"
a_ : Optional[Any] = alt_pipe(**a_ )
a_ : Dict = output.images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Optional[int] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
a_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Tuple = self.get_dummy_components()
a_ : str = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
a_ : int = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
a_ : Dict = RobertaSeriesModelWithTransformation(a_ )
a_ : Union[str, Any] = text_encoder
a_ : int = AltDiffusionPipeline(**a_ )
a_ : Optional[int] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : List[Any] = self.get_dummy_inputs(a_ )
a_ : Tuple = alt_pipe(**a_ )
a_ : int = output.images
a_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Optional[int] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=a_ )
a_ : Optional[int] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : Tuple = torch.manual_seed(0 )
a_ : Dict = alt_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
a_ : Optional[int] = output.images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
a_ : Any = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
a_ : int = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=a_ , safety_checker=a_ )
a_ : Tuple = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : List[Any] = "A painting of a squirrel eating a burger"
a_ : str = torch.manual_seed(0 )
a_ : str = alt_pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="numpy" )
a_ : Dict = output.images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 712
|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> float:
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> None:
if point:
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__, (int, float) ):
a_ : int = (
"Expected a list of numbers as input, found "
F"""{type(SCREAMING_SNAKE_CASE__ ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
a_ : Dict = F"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError("Missing an input" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> float:
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __snake_case :
__a = None
def __a ( self: int ):
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCamelCase = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A_ )
def __a ( self: List[str] ):
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(A_ , """feat_extract.json""" )
feat_extract_first.to_json_file(A_ )
__lowerCamelCase = self.feature_extraction_class.from_json_file(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self: Optional[Any] ):
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__lowerCamelCase = self.feature_extraction_class.from_pretrained(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self: List[Any] ):
__lowerCamelCase = self.feature_extraction_class()
self.assertIsNotNone(A_ )
| 281
|
"""simple docstring"""
from __future__ import annotations
def a_ ( lowercase__ :list[float] ):
if len(lowercase__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 1
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCAmelCase__ : Union[str, Any] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _a ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
return (preds == labels).mean()
def _a ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
snake_case__ : Any = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
snake_case__ : Any = fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
"""simple docstring"""
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
snake_case__ : Optional[int] = pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0]
snake_case__ : List[str] = spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _a ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
def _a ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , '''sklearn''' )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
| 713
|
'''simple docstring'''
def _a ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
snake_case__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case__ : Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502
| 0
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-12 )-> Dict:
"""simple docstring"""
UpperCamelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
UpperCamelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
return jnp.matmul(SCREAMING_SNAKE_CASE_ , norm_emb_a.T )
class __magic_name__ ( nn.Module ):
UpperCamelCase_ :CLIPConfig
UpperCamelCase_ :jnp.dtype = jnp.floataa
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase_ = nn.Dense(self.config.projection_dim , use_bias=_lowercase , dtype=self.dtype )
UpperCamelCase_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
UpperCamelCase_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , _lowercase )-> List[Any]:
UpperCamelCase_ = self.vision_model(_lowercase )[1]
UpperCamelCase_ = self.visual_projection(_lowercase )
UpperCamelCase_ = jax_cosine_distance(_lowercase , self.special_care_embeds )
UpperCamelCase_ = jax_cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase_ = 0.0
UpperCamelCase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase_ = jnp.round(_lowercase , 3 )
UpperCamelCase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=_lowercase )
# Use a lower threshold if an image has any special care concept
UpperCamelCase_ = is_special_care * 0.01
UpperCamelCase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase_ = jnp.round(_lowercase , 3 )
UpperCamelCase_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Union[str, Any] = CLIPConfig
UpperCamelCase_ :int = """clip_input"""
UpperCamelCase_ :str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _lowercase , _lowercase = None , _lowercase = 0 , _lowercase = jnp.floataa , _lowercase = True , **_lowercase , )-> Tuple:
if input_shape is None:
UpperCamelCase_ = (1, 224, 224, 3)
UpperCamelCase_ = self.module_class(config=_lowercase , dtype=_lowercase , **_lowercase )
super().__init__(_lowercase , _lowercase , input_shape=_lowercase , seed=_lowercase , dtype=_lowercase , _do_init=_do_init )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None )-> FrozenDict:
# init input tensor
UpperCamelCase_ = jax.random.normal(_lowercase , _lowercase )
UpperCamelCase_ , UpperCamelCase_ = jax.random.split(_lowercase )
UpperCamelCase_ = {"params": params_rng, "dropout": dropout_rng}
UpperCamelCase_ = self.module.init(_lowercase , _lowercase )["params"]
return random_params
def __call__( self , _lowercase , _lowercase = None , )-> List[Any]:
UpperCamelCase_ = jnp.transpose(_lowercase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(_lowercase , dtype=jnp.floataa ) , rngs={} , )
| 628
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=1_6 , SCREAMING_SNAKE_CASE_ = 1_0 , SCREAMING_SNAKE_CASE_ = 2 )-> Optional[Any]:
"""simple docstring"""
def get_dataset(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase_ = get_dataset(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = get_dataset(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
UpperCamelCase_ = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Any:
"""simple docstring"""
UpperCamelCase_ = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase_ , UpperCamelCase_ = batch
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module ):
def __init__( self )-> List[Any]:
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.randn(1 ) )
UpperCamelCase_ = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase_ ( self , _lowercase )-> str:
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(total_limit=1 , project_dir=_lowercase , automatic_checkpoint_naming=_lowercase )
# Train baseline
UpperCamelCase_ = Accelerator(project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
# Train baseline
UpperCamelCase_ = Accelerator()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
UpperCamelCase_ = os.path.join(_lowercase , "initial" )
accelerator.save_state(_lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
UpperCamelCase_ = train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = Accelerator()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(_lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
UpperCamelCase_ = train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
UpperCamelCase_ = os.path.join(_lowercase , "checkpoint" )
accelerator.save_state(_lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowercase )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
UpperCamelCase_ = train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowercase )
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(os.path.join(_lowercase , "checkpoints" , "checkpoint_0" ) )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
UpperCamelCase_ = train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((UpperCamelCase_) , (UpperCamelCase_)) = model.a.item(), model.b.item()
UpperCamelCase_ = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = torch.tensor([1, 2, 3] )
UpperCamelCase_ = torch.tensor([2, 3, 4] )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(net.parameters() )
UpperCamelCase_ = Accelerator()
with self.assertRaises(_lowercase ) as ve:
accelerator.register_for_checkpointing(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase_ = torch.optim.lr_scheduler.StepLR(_lowercase , step_size=1 , gamma=0.99 )
UpperCamelCase_ , UpperCamelCase_ = dummy_dataloaders()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
UpperCamelCase_ = scheduler.state_dict()
train(3 , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertNotEqual(_lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(_lowercase , scheduler.state_dict() )
def UpperCAmelCase_ ( self )-> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase_ = DummyModel()
UpperCamelCase_ = ProjectConfiguration(automatic_checkpoint_naming=_lowercase , total_limit=2 )
# Train baseline
UpperCamelCase_ = Accelerator(project_dir=_lowercase , project_config=_lowercase )
UpperCamelCase_ = accelerator.prepare(_lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowercase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE :Any = DummyModel()
SCREAMING_SNAKE_CASE :Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
SCREAMING_SNAKE_CASE :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = dummy_dataloaders()
SCREAMING_SNAKE_CASE :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE :int = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE :Optional[Any] = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE :List[str] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE :Optional[int] = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE :str = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 628
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
_snake_case : List[Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_snake_case : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the training data."""} )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the validation data."""} )
SCREAMING_SNAKE_CASE__ =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
SCREAMING_SNAKE_CASE__ =field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
SCREAMING_SNAKE_CASE__ =field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__SCREAMING_SNAKE_CASE )} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
SCREAMING_SNAKE_CASE__ =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name or path of preprocessor config."""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Stride to use for the encoder."""} , )
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a=1_92, _a=32, _a=4, _a=0.6 ) -> Dict:
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = mask_patch_size
__SCREAMING_SNAKE_CASE = model_patch_size
__SCREAMING_SNAKE_CASE = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
__SCREAMING_SNAKE_CASE = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE = self.rand_size**2
__SCREAMING_SNAKE_CASE = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE = np.zeros(self.token_count, dtype=_a )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE = mask.repeat(self.scale, axis=0 ).repeat(self.scale, axis=1 )
return torch.tensor(mask.flatten() )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.stack([example["pixel_values"] for example in examples] )
__SCREAMING_SNAKE_CASE = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _A ( ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds["train"].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split["train"]
__SCREAMING_SNAKE_CASE = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name_or_path , **__snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__snake_case , "decoder_type" ):
__SCREAMING_SNAKE_CASE = "simmim"
# adapt config
__SCREAMING_SNAKE_CASE = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
__SCREAMING_SNAKE_CASE = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__SCREAMING_SNAKE_CASE = AutoModelForMaskedImageModeling.from_config(__snake_case )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds["train"].column_names
else:
__SCREAMING_SNAKE_CASE = ds["validation"].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = "image"
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = "img"
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda __snake_case : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__snake_case :Any ):
__SCREAMING_SNAKE_CASE = [transforms(__snake_case ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics("eval" , __snake_case )
trainer.save_metrics("eval" , __snake_case )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 701
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case : Dict = random.Random()
def _A ( __snake_case :str , __snake_case :str=1.0 , __snake_case :Any=None , __snake_case :Optional[int]=None ) -> Tuple:
"""simple docstring"""
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self, _a, _a=7, _a=4_00, _a=20_00, _a=20_48, _a=1_28, _a=1, _a=5_12, _a=30, _a=4_41_00, ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = num_audio_channels
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = sampling_rate
def __lowerCAmelCase ( self ) -> List[Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowerCAmelCase ( self, _a=False, _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =TvltFeatureExtractor
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a, "spectrogram_length" ) )
self.assertTrue(hasattr(_a, "feature_size" ) )
self.assertTrue(hasattr(_a, "num_audio_channels" ) )
self.assertTrue(hasattr(_a, "hop_length" ) )
self.assertTrue(hasattr(_a, "chunk_length" ) )
self.assertTrue(hasattr(_a, "sampling_rate" ) )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(_a )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = dict_first.pop("mel_filters" )
__SCREAMING_SNAKE_CASE = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_a, _a ) )
self.assertEqual(_a, _a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(_a, "feat_extract.json" )
feat_extract_first.to_json_file(_a )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(_a )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = dict_first.pop("mel_filters" )
__SCREAMING_SNAKE_CASE = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_a, _a ) )
self.assertEqual(_a, _a )
def __lowerCAmelCase ( self ) -> Dict:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
__SCREAMING_SNAKE_CASE = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0], return_tensors="np", sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(_a, return_tensors="np", sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__SCREAMING_SNAKE_CASE = feature_extractor(
_a, return_tensors="np", sampling_rate=4_41_00, mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__SCREAMING_SNAKE_CASE = np.asarray(_a )
__SCREAMING_SNAKE_CASE = feature_extractor(_a, return_tensors="np", sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowerCAmelCase ( self, _a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(_a, return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape, (1, 1, 1_92, 1_28) )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], _a, atol=1E-4 ) )
| 214
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.