code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A_ :
'''simple docstring'''
def __init__( self: str , a: Union[str, Any] , a: Tuple=13 , a: Dict=7 , a: Any=True , a: Optional[int]=True , a: Optional[int]=True , a: int=True , a: Dict=99 , a: Union[str, Any]=32 , a: Union[str, Any]=2 , a: Optional[int]=4 , a: int=37 , a: List[Any]="gelu" , a: str=0.1 , a: Optional[int]=0.1 , a: Optional[Any]=512 , a: List[str]=16 , a: int=2 , a: Dict=0.0_2 , a: Optional[Any]=3 , a: int=4 , a: Optional[Any]=None , ):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : List[str] = 13
__lowerCamelCase : int = 7
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : List[str] = True
__lowerCamelCase : Tuple = 99
__lowerCamelCase : Union[str, Any] = 32
__lowerCamelCase : str = 2
__lowerCamelCase : Union[str, Any] = 4
__lowerCamelCase : int = 37
__lowerCamelCase : Union[str, Any] = 'gelu'
__lowerCamelCase : List[Any] = 0.1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : Union[str, Any] = 512
__lowerCamelCase : str = 16
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Optional[Any] = 0.0_2
__lowerCamelCase : int = 3
__lowerCamelCase : Optional[int] = 4
__lowerCamelCase : Optional[Any] = None
def _snake_case ( self: int ):
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : List[str] = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : int = None
if self.use_token_type_ids:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[Any] = None
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self: int , a: str , a: List[str] , a: List[str] , a: int , a: List[str] , a: Any , a: Optional[Any] ):
__lowerCamelCase : int = TFRoFormerModel(config=__lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase : str = [input_ids, input_mask]
__lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
__lowerCamelCase : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self: List[str] , a: List[str] , a: Union[str, Any] , a: int , a: List[str] , a: Optional[int] , a: Any , a: Dict ):
__lowerCamelCase : Tuple = True
__lowerCamelCase : int = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__lowerCamelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase : Tuple = model(__lowerCAmelCase )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _snake_case ( self: List[str] , a: Dict , a: int , a: List[str] , a: Optional[Any] , a: Tuple , a: Union[str, Any] , a: Dict ):
__lowerCamelCase : Dict = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self: Union[str, Any] , a: Dict , a: List[str] , a: List[Any] , a: int , a: str , a: Tuple , a: List[Any] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : List[str] = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__lowerCamelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Dict , a: List[str] , a: Optional[Any] , a: Any , a: List[Any] , a: List[str] , a: Union[str, Any] , a: int ):
__lowerCamelCase : Optional[Any] = self.num_choices
__lowerCamelCase : Optional[Any] = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__lowerCamelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase : Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self: str , a: Dict , a: Union[str, Any] , a: Union[str, Any] , a: Union[str, Any] , a: Union[str, Any] , a: Any , a: Union[str, Any] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : Dict = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__lowerCamelCase : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self: List[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[Any] , a: Tuple , a: Tuple , a: Tuple , a: Tuple ):
__lowerCamelCase : Dict = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__lowerCamelCase : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
__lowerCamelCase
) : Any = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
def _snake_case ( self: List[str] , a: List[Any] , a: Optional[Any] , a: Dict , a: Optional[int] , a: Any ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _snake_case ( self: List[Any] ):
__lowerCamelCase : int = TFRoFormerModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _snake_case ( self: Tuple ):
self.config_tester.run_common_tests()
def _snake_case ( self: int ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: List[str] ):
__lowerCamelCase : Dict = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__lowerCamelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : str = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__lowerCamelCase : Dict = 5_0000
__lowerCamelCase : int = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCamelCase : Optional[Any] = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 1e-4
def _snake_case ( self: int ):
__lowerCamelCase : int = tf.constant([[4, 10]] )
__lowerCamelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCamelCase : Union[str, Any] = emba(input_ids.shape )
__lowerCamelCase : Union[str, Any] = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
__lowerCamelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__lowerCamelCase : Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 1e-4
def _snake_case ( self: Union[str, Any] ):
# 2,12,16,64
__lowerCamelCase : Optional[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__lowerCamelCase : Optional[int] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__lowerCamelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__lowerCamelCase : Any = embed_positions([2, 16, 768] )[None, None, :, :]
__lowerCamelCase : List[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase : Optional[int] = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
__lowerCamelCase : str = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 669
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCAmelCase__: Tuple = logging.get_logger(__name__)
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : int = CLIPConfig
__lowerCamelCase : Optional[int] = ['CLIPEncoderLayer']
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
SCREAMING_SNAKE_CASE_ : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.5 , __lowerCAmelCase=0.5 ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.vision_model(__lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : int = self.p_head(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE_ : Any = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE_ : Dict = nsfw_detected.tolist()
if any(__lowerCAmelCase ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__lowerCAmelCase ):
if nsfw_detected_:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros(images[idx].shape )
SCREAMING_SNAKE_CASE_ : Any = self.w_head(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = watermark_detected.flatten()
SCREAMING_SNAKE_CASE_ : List[str] = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE_ : List[str] = watermark_detected.tolist()
if any(__lowerCAmelCase ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__lowerCAmelCase ):
if watermark_detected_:
SCREAMING_SNAKE_CASE_ : List[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 345
| 0
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __A ( lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , F"Difference between torch and flax is {diff} (>= {tol})." )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCAmelCase : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
_UpperCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = {"""vision_model""": vision_model, """text_model""": text_model}
_UpperCAmelCase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = after_output[0]
_UpperCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
_UpperCAmelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCAmelCase : str = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : int = to_atuple(vision_model.config.image_size )
_UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase : Dict = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCAmelCase : List[Any] = inputs_dict
_UpperCAmelCase : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCAmelCase : int = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCAmelCase : Optional[Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCAmelCase : Any = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCAmelCase : str = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case_ (self ):
_UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : str = config_inputs_dict.pop("""vision_config""" )
_UpperCAmelCase : List[Any] = config_inputs_dict.pop("""text_config""" )
_UpperCAmelCase : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : int = self.get_pretrained_model_and_inputs()
_UpperCAmelCase : List[Any] = model_a(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = model_a(**lowerCAmelCase__ )
_UpperCAmelCase : Tuple = after_outputs[0]
_UpperCAmelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __a , unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCAmelCase : Tuple = 1_3
_UpperCAmelCase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : List[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = FlaxViTModel(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = FlaxViTModelTester(self )
_UpperCAmelCase : Tuple = FlaxBertModelTester(self )
_UpperCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase : int = vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __a , unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCAmelCase : Optional[int] = 1_3
_UpperCAmelCase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase : str = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : List[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case_ (self ):
_UpperCAmelCase : str = FlaxCLIPVisionModelTester(self )
_UpperCAmelCase : List[Any] = FlaxBertModelTester(self )
_UpperCAmelCase : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Any = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase : List[Any] = vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ):
_UpperCAmelCase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
_UpperCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_UpperCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase : Union[str, Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""np""" )
_UpperCAmelCase : str = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 156
|
'''simple docstring'''
import os
def __A ( lowerCAmelCase_ = "input.txt" ):
with open(os.path.join(os.path.dirname(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) as input_file:
_UpperCAmelCase : Tuple = [
[int(lowerCAmelCase_ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_UpperCAmelCase : Dict = len(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = len(matrix[0] )
_UpperCAmelCase : Dict = [[-1 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = matrix[i][0]
for j in range(1 , lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCAmelCase : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"{solution() = }")
| 156
| 1
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66
|
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Tuple = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 253
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=0 ) -> Tuple:
"""simple docstring"""
if name is None:
lowerCamelCase_ : Dict = None
else:
lowerCamelCase_ : Any = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
lowerCamelCase_ : Dict = fmt.format(__UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if msg is not None:
print(__UpperCAmelCase )
for k in val.keys():
recursive_print(__UpperCAmelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCAmelCase , torch.Tensor ):
print(__UpperCAmelCase , ":" , val.size() )
else:
print(__UpperCAmelCase , ":" , __UpperCAmelCase )
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase_ : Dict = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase_ : Optional[Any] = param.view(*__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = param.transpose(0 , 2 )
lowerCamelCase_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase_ : Optional[int] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase_ : Optional[Any] = param.view(*__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = param.transpose(0 , 1 ).contiguous()
lowerCamelCase_ : Union[str, Any] = param.view(*__UpperCAmelCase )
return param
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = {}
# old versions did not store training args
lowerCamelCase_ : Optional[Any] = input_state_dict.get("args" , __UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase_ : List[str] = ds_args.padded_vocab_size
lowerCamelCase_ : Optional[int] = ds_args.max_position_embeddings
lowerCamelCase_ : Union[str, Any] = ds_args.hidden_size
lowerCamelCase_ : Tuple = ds_args.num_layers
lowerCamelCase_ : List[str] = ds_args.num_attention_heads
lowerCamelCase_ : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase_ : List[Any] = config.n_head
# The hidden_size per head.
lowerCamelCase_ : Tuple = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase_ : Any = input_state_dict["checkpoint_version"]
else:
lowerCamelCase_ : int = 0.0
# The model.
lowerCamelCase_ : int = input_state_dict["model"]
# The language model.
lowerCamelCase_ : Dict = model["language_model"]
# The embeddings.
lowerCamelCase_ : Optional[int] = lm["embedding"]
# The word embeddings.
lowerCamelCase_ : Union[str, Any] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase_ : int = word_embeddings[: config.vocab_size, :]
lowerCamelCase_ : int = word_embeddings
# The position embeddings.
lowerCamelCase_ : List[str] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase_ : List[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
lowerCamelCase_ : Optional[int] = pos_embeddings
# The transformer.
lowerCamelCase_ : List[str] = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
lowerCamelCase_ : Optional[int] = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
lowerCamelCase_ : Optional[Any] = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase_ : Optional[int] = layer_re.match(__UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase_ : str = int(m.group(1 ) )
# The name of the operation.
lowerCamelCase_ : Optional[int] = m.group(2 )
# Is it a weight or a bias?
lowerCamelCase_ : int = m.group(3 )
# The name of the layer.
lowerCamelCase_ : Optional[Any] = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
lowerCamelCase_ : Optional[int] = "ln_1" if op_name.startswith("input" ) else "ln_2"
lowerCamelCase_ : int = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase_ : Union[str, Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase_ : Any = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCamelCase_ : Union[str, Any] = masked_bias
lowerCamelCase_ : Union[str, Any] = fix_query_key_value_ordering(__UpperCAmelCase , __UpperCAmelCase , 3 , __UpperCAmelCase , __UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase_ : Dict = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCamelCase_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase_ : Union[str, Any] = fix_query_key_value_ordering(__UpperCAmelCase , __UpperCAmelCase , 3 , __UpperCAmelCase , __UpperCAmelCase )
# Store. No change of shape.
lowerCamelCase_ : Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase_ : Union[str, Any] = megatron_to_transformers[op_name]
lowerCamelCase_ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase_ : Optional[int] = megatron_to_transformers[op_name]
lowerCamelCase_ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase_ : List[Any] = transformer["final_layernorm.weight"]
lowerCamelCase_ : List[Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase_ : Union[str, Any] = word_embeddings
# It should be done!
return output_state_dict
def __a ( ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=__UpperCAmelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__UpperCAmelCase , help="An optional config json file describing the pre-trained model." , )
lowerCamelCase_ : str = parser.parse_args()
# Extract the basename.
lowerCamelCase_ : Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
lowerCamelCase_ : int = torch.load(__UpperCAmelCase , map_location="cpu" )
else:
lowerCamelCase_ : int = torch.load(args.path_to_checkpoint , map_location="cpu" )
lowerCamelCase_ : Any = input_state_dict.get("args" , __UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase_ : Optional[int] = "gelu_fast"
elif ds_args.openai_gelu:
lowerCamelCase_ : List[str] = "gelu_new"
else:
lowerCamelCase_ : int = "gelu"
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase_ : Any = "gelu_new"
# Spell out all parameters in case the defaults change.
lowerCamelCase_ : int = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__UpperCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=__UpperCAmelCase , summary_activation=__UpperCAmelCase , summary_proj_to_labels=__UpperCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCAmelCase , use_cache=__UpperCAmelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCamelCase_ : Dict = GPTaConfig.from_json_file(args.config_file )
lowerCamelCase_ : Tuple = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
lowerCamelCase_ : Dict = convert_megatron_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCAmelCase , __UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase_ : List[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase_ : Union[str, Any] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase_ : Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
lowerCamelCase_ : List[Any] = "gpt2"
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = type(__UpperCAmelCase ).__name__
lowerCamelCase_ : Dict = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(__UpperCAmelCase )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(__UpperCAmelCase )
# Store the state_dict to file.
lowerCamelCase_ : List[str] = os.path.join(__UpperCAmelCase , "pytorch_model.bin" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 253
| 1
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
a = logging.WARNING
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case: Tuple =os.getenv('DATASETS_VERBOSITY' , __UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def a_ ( ) -> str:
"""simple docstring"""
return __name__.split('.' )[0]
def a_ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def a_ ( ) -> None:
"""simple docstring"""
snake_case: Tuple =_get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def a_ ( ) -> None:
"""simple docstring"""
snake_case: Union[str, Any] =_get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def a_ ( __UpperCAmelCase = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
snake_case: Tuple =_get_library_name()
return logging.getLogger(__UpperCAmelCase )
def a_ ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def a_ ( __UpperCAmelCase ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(__UpperCAmelCase )
def a_ ( ) -> str:
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def a_ ( ) -> Tuple:
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def a_ ( ) -> None:
"""simple docstring"""
snake_case: Union[str, Any] =False
def a_ ( ) -> None:
"""simple docstring"""
snake_case: Union[str, Any] =True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class a_ :
def __init__( self : Union[str, Any] , *a_ : List[Any] , **a_ : Optional[int] ) -> List[Any]: # pylint: disable=unused-argument
snake_case: str =args[0] if args else None
def __iter__( self : List[Any] ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self : int , a_ : List[str] ) -> Optional[int]:
def empty_fn(*a_ : Any , **a_ : Optional[int] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ) -> Optional[int]:
return self
def __exit__( self : List[Any] , a_ : str , a_ : Optional[int] , a_ : Tuple ) -> Optional[int]:
return
a = True
class a_ :
def __call__( self : Optional[int] , *a_ : Dict , a_ : List[str]=False , **a_ : Optional[Any] ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a_ , **a_ )
else:
return EmptyTqdm(*a_ , **a_ )
def UpperCamelCase ( self : Union[str, Any] , *a_ : Tuple , **a_ : int ) -> Dict:
snake_case: str =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a_ , **a_ )
def UpperCamelCase ( self : Tuple ) -> List[str]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a = _tqdm_cls()
def a_ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
global _tqdm_active
snake_case: Tuple =True
def a_ ( ) -> Optional[int]:
"""simple docstring"""
global _tqdm_active
snake_case: Optional[Any] =False
| 350
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a_ :
pass
| 350
| 1
|
import math
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
if not isinstance(snake_case , snake_case ):
_UpperCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case )
if number < 1:
_UpperCAmelCase = f"Input value of [number={number}] must be > 0"
raise ValueError(snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase = int(math.log(number // 3 , 2 ) ) + 2
_UpperCAmelCase = [3, 5]
_UpperCAmelCase = 2
_UpperCAmelCase = 3
for block in range(1 , snake_case ):
for _ in range(snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a = 0
try:
a = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 175
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a = logging.get_logger(__name__)
a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
a = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
a = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _A ( __lowercase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = do_lower_case
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
_UpperCAmelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 175
| 1
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( A_: List[Any] , A_: Any=7 ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =None
if token is not None:
__UpperCAmelCase ={"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
__UpperCAmelCase ="""636036"""
__UpperCAmelCase =F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
__UpperCAmelCase =requests.get(A_ , headers=A_ ).json()
return result["workflow_runs"]
def lowercase__ ( A_: Dict ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =get_daily_ci_runs(A_ )
__UpperCAmelCase =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__UpperCAmelCase =workflow_run["""id"""]
break
return workflow_run_id
def lowercase__ ( A_: Optional[int] , A_: Union[str, Any] , A_: str ) -> str:
"""simple docstring"""
__UpperCAmelCase =get_last_daily_ci_runs(A_ )
if workflow_run_id is not None:
__UpperCAmelCase =get_artifacts_links(worflow_run_id=A_ , token=A_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__UpperCAmelCase =artifacts_links[artifact_name]
download_artifact(
artifact_name=A_ , artifact_url=A_ , output_dir=A_ , token=A_ )
def lowercase__ ( A_: Dict , A_: int , A_: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(A_ , A_ , A_ )
__UpperCAmelCase ={}
for artifact_name in artifact_names:
__UpperCAmelCase =os.path.join(A_ , F'''{artifact_name}.zip''' )
if os.path.isfile(A_ ):
__UpperCAmelCase ={}
with zipfile.ZipFile(A_ ) as z:
for filename in z.namelist():
if not os.path.isdir(A_ ):
# read the file
with z.open(A_ ) as f:
__UpperCAmelCase =f.read().decode("""UTF-8""" )
return results
| 68
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
A_ = ['image_processor', 'tokenizer']
A_ = 'BridgeTowerImageProcessor'
A_ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase )-> str:
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , )-> BatchEncoding:
lowercase__ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def snake_case_( self , *_lowerCamelCase , **_lowerCamelCase )-> List[Any]:
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def snake_case_( self )-> List[Any]:
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 161
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_euler" )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_euler" )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 702
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _A ( __a , __a , unittest.TestCase ):
__a = IFPipeline
__a = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
__a = TEXT_TO_IMAGE_BATCH_PARAMS
__a = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self ) -> Tuple:
return self._get_dummy_components()
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self ) -> int:
self._test_save_load_local()
def _lowerCamelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
# if
lowerCamelCase__ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowerCamelCase__ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowerCamelCase__ , lowerCamelCase__ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase__ = None
lowerCamelCase__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase__ = IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase__ = IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 274
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
|
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
A__ = '''hf-internal-testing/tiny-random-t5'''
A__ = AutoTokenizer.from_pretrained(a__)
A__ = AutoModelForSeqaSeqLM.from_pretrained(a__)
A__ = tokenizer('''This is me''' , return_tensors='''pt''')
A__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules()))
A__ = model.generate(**a__)
A__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__)
A__ = AutoModelForSeqaSeqLM.from_pretrained(a__)
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()))
A__ = model_reloaded.generate(**a__)
self.assertTrue(torch.allclose(a__ , a__))
def snake_case_ ( self):
A__ = '''hf-internal-testing/tiny-random-t5'''
A__ = AutoModelForSeqaSeqLM.from_pretrained(a__)
A__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a__):
model.save_pretrained(a__)
A__ = model.reverse_bettertransformer()
model.save_pretrained(a__)
| 715
|
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : list[str] | None = None )-> list[list[str]]:
A__ = word_bank or []
# create a table
A__ = len(UpperCamelCase_ ) + 1
A__ = []
for _ in range(UpperCamelCase_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(UpperCamelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(UpperCamelCase_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(UpperCamelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(UpperCamelCase_ )]:
combination.reverse()
return table[len(UpperCamelCase_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 526
| 0
|
'''simple docstring'''
__magic_name__ : Tuple ={'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__magic_name__ : Union[str, Any] =['a', 'b', 'c', 'd', 'e']
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = start
# add current to visited
visited.append(lowerCamelCase_ )
__magic_name__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__magic_name__ = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
__magic_name__ = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
__magic_name__ : List[Any] =topological_sort('a', [], [])
print(sort)
| 664
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664
| 1
|
"""simple docstring"""
from torch import nn
def a ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 213
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["image_processor"]
UpperCAmelCase__ = "SamImageProcessor"
def __init__( self : str , __snake_case : Union[str, Any] ) -> str:
super().__init__(__snake_case )
__magic_name__: List[Any] = self.image_processor
__magic_name__: Optional[int] = -1_0
__magic_name__: Dict = self.image_processor.size["""longest_edge"""]
def __call__( self : List[Any] , __snake_case : Tuple=None , __snake_case : Any=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , ) -> BatchEncoding:
__magic_name__: Optional[int] = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__magic_name__: int = encoding_image_processor["""original_sizes"""]
if hasattr(__snake_case , """numpy""" ): # Checks if Torch or TF tensor
__magic_name__: Optional[Any] = original_sizes.numpy()
__magic_name__, __magic_name__, __magic_name__: Any = self._check_and_preprocess_points(
input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , )
__magic_name__: Optional[int] = self._normalize_and_convert(
__snake_case , __snake_case , input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , return_tensors=__snake_case , )
return encoding_image_processor
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any=None , __snake_case : Dict=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]="pt" , ) -> Any:
if input_points is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: str = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] ) for point in input_points
]
else:
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case )
for point, original_size in zip(__snake_case , __snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__magic_name__, __magic_name__: Tuple = self._pad_points_and_labels(__snake_case , __snake_case )
__magic_name__: Tuple = np.array(__snake_case )
if input_labels is not None:
__magic_name__: List[Any] = np.array(__snake_case )
if input_boxes is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] , is_bounding_box=__snake_case )
for box in input_boxes
]
else:
__magic_name__: List[Any] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case , is_bounding_box=__snake_case )
for box, original_size in zip(__snake_case , __snake_case )
]
__magic_name__: int = np.array(__snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__magic_name__: List[Any] = tf.convert_to_tensor(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Optional[Any] = tf.expand_dims(__snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[int] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__magic_name__: Dict = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: Union[str, Any] = tf.expand_dims(__snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__magic_name__: Union[str, Any] = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: int = tf.expand_dims(__snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Dict ) -> Optional[int]:
__magic_name__: Union[str, Any] = max([point.shape[0] for point in input_points] )
__magic_name__: Any = []
for i, point in enumerate(__snake_case ):
if point.shape[0] != expected_nb_points:
__magic_name__: Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__magic_name__: str = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__snake_case )
__magic_name__: str = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : np.ndarray , __snake_case : Tuple , __snake_case : List[str]=False ) -> np.ndarray:
__magic_name__, __magic_name__: Any = original_size
__magic_name__, __magic_name__: Tuple = self.image_processor._get_preprocess_shape(__snake_case , longest_edge=__snake_case )
__magic_name__: List[str] = deepcopy(__snake_case ).astype(__snake_case )
if is_bounding_box:
__magic_name__: List[str] = coords.reshape(-1 , 2 , 2 )
__magic_name__: str = coords[..., 0] * (new_w / old_w)
__magic_name__: int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__magic_name__: str = coords.reshape(-1 , 4 )
return coords
def lowerCamelCase__ ( self : int , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : int=None , ) -> Dict:
if input_points is not None:
if hasattr(__snake_case , """numpy""" ): # Checks for TF or Torch tensor
__magic_name__: Union[str, Any] = input_points.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_points[0] , __snake_case ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__magic_name__: Dict = [np.array(__snake_case ) for input_point in input_points]
else:
__magic_name__: str = None
if input_labels is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Optional[int] = input_labels.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_labels[0] , __snake_case ):
raise ValueError("""Input labels must be a list of list integers.""" )
__magic_name__: Tuple = [np.array(__snake_case ) for label in input_labels]
else:
__magic_name__: str = None
if input_boxes is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Tuple = input_boxes.numpy().tolist()
if (
not isinstance(__snake_case , __snake_case )
or not isinstance(input_boxes[0] , __snake_case )
or not isinstance(input_boxes[0][0] , __snake_case )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__magic_name__: List[Any] = [np.array(__snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__magic_name__: List[str] = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__magic_name__: int = self.image_processor.model_input_names
return list(dict.fromkeys(__snake_case ) )
def lowerCamelCase__ ( self : Any , *__snake_case : str , **__snake_case : Union[str, Any] ) -> Optional[Any]:
return self.image_processor.post_process_masks(*__snake_case , **__snake_case )
| 213
| 1
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : float, _lowerCAmelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( __a , __a , unittest.TestCase):
__a : Dict = IFInpaintingSuperResolutionPipeline
__a : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__a : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __snake_case ( self , _A , _A=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(_A ).startswith("""mps""" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(_A )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __snake_case ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __snake_case ( self ) -> str:
'''simple docstring'''
self._test_save_load_local()
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 238
| 1
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=7_00_00 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : List[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : Dict = (iris.target != 0) * 1
__lowerCamelCase : List[str] = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(__lowerCamelCase) : int = (x[:, 0].min(), x[:, 0].max())
(__lowerCamelCase) : List[Any] = (x[:, 1].min(), x[:, 1].max())
(__lowerCamelCase) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "nllb-moe"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _lowercase : Tuple=12_81_12 , _lowercase : List[Any]=10_24 , _lowercase : Any=12 , _lowercase : List[Any]=40_96 , _lowercase : str=16 , _lowercase : str=12 , _lowercase : Optional[int]=40_96 , _lowercase : List[Any]=16 , _lowercase : str=0.05 , _lowercase : Tuple=0.05 , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : Optional[Any]="relu" , _lowercase : str=10_24 , _lowercase : Tuple=0.1 , _lowercase : int=0.1 , _lowercase : Dict=0.0 , _lowercase : List[str]=0.02 , _lowercase : int=2 , _lowercase : Optional[Any]=True , _lowercase : List[Any]=False , _lowercase : List[str]="float32" , _lowercase : Optional[Any]=False , _lowercase : str=1_28 , _lowercase : int=64 , _lowercase : Optional[int]=4 , _lowercase : List[str]=4 , _lowercase : Union[str, Any]=0.0_01 , _lowercase : List[Any]=0.0_01 , _lowercase : List[str]="all" , _lowercase : Optional[Any]=False , _lowercase : int=False , _lowercase : Tuple=1.0 , _lowercase : Optional[int]=0.2 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=0 , _lowercase : List[Any]=2 , _lowercase : int=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = router_z_loss_coef
SCREAMING_SNAKE_CASE__ = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ = decoder_sparse_step
SCREAMING_SNAKE_CASE__ = encoder_sparse_step
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = batch_prioritized_routing
SCREAMING_SNAKE_CASE__ = second_expert_policy
SCREAMING_SNAKE_CASE__ = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE__ = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE__ = moe_token_dropout
SCREAMING_SNAKE_CASE__ = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 379
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a_ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
a_ = parser.parse_args()
a_ = 'cpu'
a_ = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
a_ = 'path-to-your-trained-model'
a_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a_ = pipe.to(device)
# to channels last
a_ = pipe.unet.to(memory_format=torch.channels_last)
a_ = pipe.vae.to(memory_format=torch.channels_last)
a_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a_ = torch.randn(2, 4, 64, 64)
a_ = torch.rand(1) * 999
a_ = torch.randn(2, 77, 768)
a_ = (sample, timestep, encoder_hidden_status)
try:
a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a_ = 666
a_ = torch.Generator(device).manual_seed(seed)
a_ = {'generator': generator}
if args.steps is not None:
a_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 25
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (__UpperCAmelCase ,unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = BioGptTokenizer
lowerCamelCase__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = """lower"""
SCREAMING_SNAKE_CASE__ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE__ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 712
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : int=3_7 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = DeiTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Any = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE__ = problem_type["""title"""]
SCREAMING_SNAKE_CASE__ = problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
| 616
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __lowercase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 37
|
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A=1 , _A=False) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[str] = n_token
_UpperCAmelCase : List[Any] = d_embed
_UpperCAmelCase : List[str] = d_proj
_UpperCAmelCase : str = cutoffs + [n_token]
_UpperCAmelCase : Union[str, Any] = [0] + self.cutoffs
_UpperCAmelCase : Optional[Any] = div_val
_UpperCAmelCase : Tuple = self.cutoffs[0]
_UpperCAmelCase : Union[str, Any] = len(self.cutoffs) - 1
_UpperCAmelCase : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_UpperCAmelCase : int = nn.Parameter(torch.zeros(self.n_clusters))
_UpperCAmelCase : Tuple = nn.ModuleList()
_UpperCAmelCase : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A)))
else:
self.out_projs.append(_A)
self.out_layers.append(nn.Linear(_A , _A))
else:
for i in range(len(self.cutoffs)):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Any = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A)))
self.out_layers.append(nn.Linear(_A , r_idx - l_idx))
_UpperCAmelCase : List[str] = keep_order
def snake_case__ ( self , _A , _A , _A , _A) -> Optional[int]:
"""simple docstring"""
if proj is None:
_UpperCAmelCase : str = nn.functional.linear(_A , _A , bias=_A)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase : Any = nn.functional.linear(_A , proj.t().contiguous())
_UpperCAmelCase : Optional[int] = nn.functional.linear(_A , _A , bias=_A)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _A , _A=None , _A=False) -> List[Any]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
_UpperCAmelCase : Tuple = labels[..., 1:].contiguous()
_UpperCAmelCase : Optional[int] = hidden.view(-1 , hidden.size(-1))
_UpperCAmelCase : List[Any] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''')
else:
_UpperCAmelCase : Any = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_UpperCAmelCase : Dict = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_UpperCAmelCase : List[Any] = labels != -100
_UpperCAmelCase : Optional[int] = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device)
_UpperCAmelCase : List[Any] = (
-nn.functional.log_softmax(_A , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_UpperCAmelCase : Any = nn.functional.log_softmax(_A , dim=-1)
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase : Any = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : List[str] = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase : Dict = self.out_layers[i].weight
_UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCAmelCase : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_A)
biases.append(_A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase : List[Any] = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : Dict = nn.functional.log_softmax(_A , dim=1)
if labels is None:
_UpperCAmelCase : Union[str, Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_UpperCAmelCase : Tuple = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device)
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Any = [0] + self.cutoffs
for i in range(len(_A) - 1):
_UpperCAmelCase , _UpperCAmelCase : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase : int = labels.index_select(0 , _A) - l_idx
_UpperCAmelCase : Optional[int] = head_logprob.index_select(0 , _A)
_UpperCAmelCase : List[str] = hidden.index_select(0 , _A)
else:
_UpperCAmelCase : Optional[int] = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase : Tuple = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_UpperCAmelCase : int = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase : Dict = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : List[Any] = nn.functional.log_softmax(_A , dim=1)
_UpperCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_UpperCAmelCase : str = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase : Tuple = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''') and self.keep_order) or keep_order:
out.index_copy_(0 , _A , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _A) -> Dict:
"""simple docstring"""
if self.n_clusters == 0:
_UpperCAmelCase : List[str] = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_A , dim=-1)
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase : List[str] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase : Any = self.out_layers[i].weight
_UpperCAmelCase : List[Any] = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase : Any = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCAmelCase : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_A)
biases.append(_A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase : int = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : str = hidden.new_empty((head_logit.size(0), self.n_token))
_UpperCAmelCase : List[str] = nn.functional.log_softmax(_A , dim=1)
_UpperCAmelCase : List[str] = [0] + self.cutoffs
for i in range(len(_A) - 1):
_UpperCAmelCase , _UpperCAmelCase : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase : Dict = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : Optional[int] = nn.functional.log_softmax(_A , dim=1)
_UpperCAmelCase : Tuple = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase : str = logprob_i
return out
| 485
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Any = '''pix2struct_text_model'''
_A : Any = ['''past_key_values''']
_A : Optional[int] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , lowerCAmelCase__ : Dict=5_0_2_4_4 , lowerCAmelCase__ : Optional[Any]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=6_4 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : str=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_2 , lowerCAmelCase__ : Optional[Any]=1_2_8 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[Any]=1E-6 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : str="gelu_new" , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[Any]=True , **lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = d_kv
__SCREAMING_SNAKE_CASE : Any = d_ff
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_layers
__SCREAMING_SNAKE_CASE : int = num_heads
__SCREAMING_SNAKE_CASE : List[str] = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : str = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Any = dropout_rate
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : List[Any] = initializer_factor
__SCREAMING_SNAKE_CASE : Dict = use_cache
__SCREAMING_SNAKE_CASE : List[str] = eos_token_id
__SCREAMING_SNAKE_CASE : str = decoder_start_token_id
# for backwards compatibility
__SCREAMING_SNAKE_CASE : Optional[int] = dense_act_fn
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , is_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
@classmethod
def UpperCamelCase__ ( cls : Any , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : str ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__SCREAMING_SNAKE_CASE : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[Any] = '''pix2struct_vision_model'''
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int]=7_6_8 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Tuple=6_4 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Dict="gelu_new" , lowerCAmelCase__ : int=1E-6 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Any=1E-10 , lowerCAmelCase__ : str=1.0 , lowerCAmelCase__ : Dict=4_0_9_6 , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : Dict=1_2_8 , **lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = patch_embed_hidden_size
__SCREAMING_SNAKE_CASE : int = d_ff
__SCREAMING_SNAKE_CASE : List[Any] = dropout_rate
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = initializer_factor
__SCREAMING_SNAKE_CASE : str = attention_dropout
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = dense_act_fn
__SCREAMING_SNAKE_CASE : Dict = seq_len
__SCREAMING_SNAKE_CASE : List[Any] = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : List[Any] = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__SCREAMING_SNAKE_CASE : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''pix2struct'''
_A : Any = True
def __init__( self : str , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=1.0 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
if text_config is None:
__SCREAMING_SNAKE_CASE : Tuple = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
__SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
__SCREAMING_SNAKE_CASE : List[str] = PixaStructTextConfig(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = PixaStructVisionConfig(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = self.text_config.pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = self.text_config.eos_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = self.initializer_range
__SCREAMING_SNAKE_CASE : Dict = self.initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_vqa
@classmethod
def UpperCamelCase__ ( cls : Dict , lowerCAmelCase__ : PixaStructTextConfig , lowerCAmelCase__ : PixaStructVisionConfig , **lowerCAmelCase__ : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE : Dict = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 178
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''beit'''
def __init__( self : Tuple , lowerCAmelCase__ : List[Any]=8_1_9_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Dict=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Any=1E-12 , lowerCAmelCase__ : Dict=2_2_4 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=[3, 5, 7, 1_1] , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=2_5_6 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Union[str, Any]=2_5_5 , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = patch_size
__SCREAMING_SNAKE_CASE : int = num_channels
__SCREAMING_SNAKE_CASE : Tuple = use_mask_token
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = use_relative_position_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Any = out_indices
__SCREAMING_SNAKE_CASE : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : List[Any] = use_auxiliary_head
__SCREAMING_SNAKE_CASE : str = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE : str = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : int = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : Dict = semantic_loss_ignore_index
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return 1E-4
| 178
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowerCAmelCase : int = XLNetTokenizer
__lowerCAmelCase : Optional[Any] = XLNetTokenizerFast
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[str] = True
def __UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ = XLNetTokenizer(__snake_case ,keep_accents=__snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = '''<s>'''
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) ,__snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) ,__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''<eod>''' )
self.assertEqual(len(__snake_case ) ,1_0_0_6 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = XLNetTokenizer(__snake_case ,keep_accents=__snake_case )
A_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
A_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
A_ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
A_ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = XLNetTokenizer(__snake_case ,do_lower_case=__snake_case )
A_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''▁he''', '''ll''', '''o'''] )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = XLNetTokenizer(__snake_case ,do_lower_case=__snake_case )
A_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
@slow
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
A_ = tokenizer.encode('''sequence builders''' ,add_special_tokens=__snake_case )
A_ = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__snake_case )
A_ = tokenizer.build_inputs_with_special_tokens(__snake_case )
A_ = tokenizer.build_inputs_with_special_tokens(__snake_case ,__snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
| 188
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = (KDPMaDiscreteScheduler,)
__lowerCAmelCase : Union[str, Any] = 10
def __UpperCAmelCase ( self ,**__snake_case ):
"""simple docstring"""
A_ = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__snake_case )
return config
def __UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case ,beta_end=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
A_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(__snake_case ,__snake_case )
A_ = model(__snake_case ,__snake_case )
A_ = scheduler.step(__snake_case ,__snake_case ,__snake_case )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__snake_case ) )
A_ = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(__snake_case ,__snake_case )
A_ = model(__snake_case ,__snake_case )
A_ = scheduler.step(__snake_case ,__snake_case ,__snake_case )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__snake_case ) )
A_ = torch.mean(torch.abs(__snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
if torch_device == "mps":
return
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=__snake_case )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.to(__snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(__snake_case ,__snake_case )
A_ = model(__snake_case ,__snake_case )
A_ = scheduler.step(__snake_case ,__snake_case ,__snake_case )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(__snake_case ) )
A_ = torch.mean(torch.abs(__snake_case ) )
if str(__snake_case ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 188
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : int = PriorTransformer
__lowerCAmelCase : List[Any] = """hidden_states"""
@property
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[Any] = 4
snake_case__ : List[Any] = 8
snake_case__ : Tuple = 7
snake_case__ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(__lowercase )
snake_case__ : Any = floats_tensor((batch_size, embedding_dim) ).to(__lowercase )
snake_case__ : Tuple = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__lowercase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int]=0 ):
torch.manual_seed(__lowercase )
snake_case__ : Optional[int] = 4
snake_case__ : Dict = 8
snake_case__ : int = 7
snake_case__ : Dict = torch.randn((batch_size, embedding_dim) ).to(__lowercase )
snake_case__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(__lowercase )
snake_case__ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowercase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __lowerCamelCase ( self :int ):
return (4, 8)
@property
def __lowerCamelCase ( self :List[Any] ):
return (4, 8)
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
snake_case__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self :Dict ):
snake_case__ , snake_case__ : List[str] = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowercase )
snake_case__ : Any = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __lowerCamelCase ( self :Dict ):
snake_case__ , snake_case__ : int = self.prepare_init_args_and_inputs_for_common()
snake_case__ : Dict = self.model_class(**__lowercase )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : int = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[str] = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
snake_case__ : Optional[Any] = model.to(__lowercase )
if hasattr(__lowercase ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
snake_case__ : Union[str, Any] = self.get_dummy_seed_input()
with torch.no_grad():
snake_case__ : str = model(**__lowercase )[0]
snake_case__ : Union[str, Any] = output[0, :5].flatten().cpu()
print(__lowercase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case__ : List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(__lowercase ,__lowercase ,rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Any ,__lowercase :Optional[int]=1 ,__lowercase :Union[str, Any]=7_6_8 ,__lowercase :List[str]=7_7 ,__lowercase :Optional[int]=0 ):
torch.manual_seed(__lowercase )
snake_case__ : int = batch_size
snake_case__ : Optional[Any] = embedding_dim
snake_case__ : Optional[int] = num_embeddings
snake_case__ : str = torch.randn((batch_size, embedding_dim) ).to(__lowercase )
snake_case__ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__lowercase )
snake_case__ : Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowercase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __lowerCamelCase ( self :str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def __lowerCamelCase ( self :Dict ,__lowercase :Dict ,__lowercase :Tuple ):
snake_case__ : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(__lowercase )
snake_case__ : Any = self.get_dummy_seed_input(seed=__lowercase )
with torch.no_grad():
snake_case__ : Optional[Any] = model(**__lowercase )[0]
assert list(sample.shape ) == [1, 7_6_8]
snake_case__ : str = sample[0, :8].flatten().cpu()
print(__lowercase )
snake_case__ : str = torch.tensor(__lowercase )
assert torch_all_close(__lowercase ,__lowercase ,atol=1e-3 )
| 219
|
import unittest
from transformers import DonutProcessor
A__ = '''naver-clova-ix/donut-base'''
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = DonutProcessor.from_pretrained(__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case__ : List[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case__ : Any = self.processor.tokenajson(__lowercase )
self.assertDictEqual(__lowercase ,__lowercase )
| 219
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "blip_text_model"
def __init__( self : str , __lowerCAmelCase : str=3_0_5_2_4 , __lowerCAmelCase : Any=7_6_8 , __lowerCAmelCase : int=7_6_8 , __lowerCAmelCase : Optional[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : int=8 , __lowerCAmelCase : List[str]=5_1_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=1E-12 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Dict=3_0_5_2_2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Optional[int]=1_0_2 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=True , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , sep_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Tuple = encoder_hidden_size
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : str = is_decoder
_lowerCamelCase : int = use_cache
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_lowerCamelCase : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __snake_case ( _lowercase):
snake_case__ : Any = "blip_vision_model"
def __init__( self : int , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : Union[str, Any]=3_0_7_2 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : Dict=1_2 , __lowerCAmelCase : Optional[Any]=3_8_4 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[Any]=1E-10 , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Optional[int] = projection_dim
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : int ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Dict = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_lowerCamelCase : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class __snake_case ( _lowercase):
snake_case__ : Any = "blip"
snake_case__ : Any = True
def __init__( self : Optional[int] , __lowerCAmelCase : str=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : int=2.65_92 , __lowerCAmelCase : List[Any]=2_5_6 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if text_config is None:
_lowerCamelCase : Optional[int] = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
_lowerCamelCase : Dict = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
_lowerCamelCase : Any = BlipTextConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BlipVisionConfig(**__lowerCAmelCase )
_lowerCamelCase : str = self.vision_config.hidden_size
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : Optional[Any] = logit_scale_init_value
_lowerCamelCase : List[Any] = 1.0
_lowerCamelCase : int = 0.02
_lowerCamelCase : Dict = image_text_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , __lowerCAmelCase : BlipTextConfig , __lowerCAmelCase : BlipVisionConfig , **__lowerCAmelCase : str ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = copy.deepcopy(self.__dict__ )
_lowerCamelCase : str = self.text_config.to_dict()
_lowerCamelCase : Union[str, Any] = self.vision_config.to_dict()
_lowerCamelCase : Optional[int] = self.__class__.model_type
return output
| 83
|
lowerCamelCase :Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[str]:
_a = set()
# keep track of all the paths to be checked
_a = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a = queue.pop(0 )
# get the last node from the path
_a = path[-1]
if node not in explored:
_a = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a = [start]
_a = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
_a = {start: 0, target: -1}
while queue:
_a = queue.pop(0 )
if node == target:
_a = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
_a = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 487
| 0
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_A : Tuple = 2
class a :
def __init__( self : List[str] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : List[str]=None , ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Union[str, Any] = bos, unk, pad, eos
__lowerCamelCase: Union[str, Any] = []
__lowerCamelCase: Tuple = []
__lowerCamelCase: List[Any] = {}
__lowerCamelCase: List[str] = self.add_symbol(_lowerCAmelCase )
__lowerCamelCase: str = self.add_symbol(_lowerCAmelCase )
__lowerCamelCase: str = self.add_symbol(_lowerCAmelCase )
__lowerCamelCase: Dict = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowerCamelCase: Dict = len(self.symbols )
def __eq__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.indices == other.indices
def __getitem__( self : Any , SCREAMING_SNAKE_CASE_ : str ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCamelCase: List[str] = cls()
d.add_from_file(_lowerCAmelCase )
return d
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
if word in self.indices and not overwrite:
__lowerCamelCase: int = self.indices[word]
__lowerCamelCase: str = self.count[idx] + n
return idx
else:
__lowerCamelCase: Union[str, Any] = len(self.symbols )
__lowerCamelCase: Optional[Any] = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return 0
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowerCamelCase: List[Any] = f.readlines()
__lowerCamelCase: Optional[int] = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowerCamelCase , __lowerCamelCase: Dict = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowerCamelCase: Optional[int] = True
__lowerCamelCase , __lowerCamelCase: Any = line.rsplit(""" """ , 1 )
else:
__lowerCamelCase: Dict = False
__lowerCamelCase: List[Any] = int(_lowerCAmelCase )
__lowerCamelCase: Tuple = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __lowerCAmelCase ( snake_case : Optional[Any] ) -> List[Any]:
__lowerCamelCase: Any = dict((re.sub(r"""@@$""" , """""" , snake_case ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , snake_case ), v) for k, v in d.items() )
__lowerCamelCase: int = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__lowerCamelCase: Dict = d[k] # restore
return da
def __lowerCAmelCase ( snake_case : Optional[int] , snake_case : Tuple ) -> Dict:
if not os.path.exists(snake_case ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowerCamelCase: List[str] = os.path.join(snake_case , """checkpoint.pt""" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__lowerCamelCase: int = torch.load(snake_case , map_location="""cpu""" )
__lowerCamelCase: Optional[Any] = chkpt["""cfg"""]["""model"""]
# dicts
__lowerCamelCase: Union[str, Any] = os.path.join(snake_case , """dict.txt""" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__lowerCamelCase: Any = Dictionary.load(snake_case )
__lowerCamelCase: Tuple = rewrite_dict_keys(src_dict.indices )
__lowerCamelCase: Any = len(snake_case )
__lowerCamelCase: Union[str, Any] = os.path.join(snake_case , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
__lowerCamelCase: str = os.path.join(snake_case , """bpecodes""" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__lowerCamelCase: List[Any] = os.path.join(snake_case , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(snake_case , snake_case )
# model config
__lowerCamelCase: Optional[Any] = os.path.join(snake_case , """config.json""" )
__lowerCamelCase: int = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
__lowerCamelCase: int = os.path.join(snake_case , snake_case )
__lowerCamelCase: str = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
__lowerCamelCase: int = chkpt["""model"""]
# remove unneeded keys
__lowerCamelCase: Optional[Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
__lowerCamelCase: Any = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowerCamelCase: int = model_state_dict.pop(snake_case )
else:
__lowerCamelCase: str = model_state_dict.pop(snake_case )
__lowerCamelCase: int = BioGptConfig.from_pretrained(snake_case )
__lowerCamelCase: List[str] = BioGptForCausalLM(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case )
# save
__lowerCamelCase: Any = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print("""Conversion is done!""" )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 709
|
def __lowerCAmelCase ( snake_case : int = 100 ) -> int:
__lowerCamelCase: List[Any] = 0
__lowerCamelCase: int = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : str=2 , snake_case_ : str=True , snake_case_ : List[Any]=False , snake_case_ : str=10 , snake_case_ : int=3 , snake_case_ : Optional[int]=32 * 4 , snake_case_ : Union[str, Any]=32 * 6 , snake_case_ : Any=4 , snake_case_ : Dict=32 , ):
UpperCamelCase_: Optional[int] = parent
UpperCamelCase_: Any = batch_size
UpperCamelCase_: int = is_training
UpperCamelCase_: Dict = use_auxiliary_loss
UpperCamelCase_: List[str] = num_queries
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: Dict = min_size
UpperCamelCase_: Dict = max_size
UpperCamelCase_: Tuple = num_labels
UpperCamelCase_: Optional[int] = mask_feature_size
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
UpperCamelCase_: Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
UpperCamelCase_: List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
UpperCamelCase_: Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
UpperCamelCase_: Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : str ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = self.prepare_config_and_inputs()
UpperCamelCase_: Tuple = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
UpperCamelCase_: List[str] = output.encoder_hidden_states
UpperCamelCase_: Optional[int] = output.pixel_decoder_hidden_states
UpperCamelCase_: List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : str=False ):
with torch.no_grad():
UpperCamelCase_: Optional[int] = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: str = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase_: Union[str, Any] = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : List[str] ):
UpperCamelCase_: str = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_: int = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase_: int = model(snake_case_ )
comm_check_on_output(snake_case_ )
UpperCamelCase_: Dict = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCamelCase : List[str] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Any = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = MaskFormerModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_, UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowerCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowerCAmelCase__ ( self : Any ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowerCAmelCase__ ( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase__ ( self : Any ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self : Tuple ):
pass
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_, UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[Any] = model_class(snake_case_ )
UpperCamelCase_: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: int = [*signature.parameters.keys()]
UpperCamelCase_: Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowerCAmelCase__ ( self : Dict ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase_: List[str] = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = (self.model_tester.min_size,) * 2
UpperCamelCase_: Optional[int] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=snake_case_ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=snake_case_ ),
"""class_labels""": torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
UpperCamelCase_: Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
UpperCamelCase_: Any = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_, UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_, UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Union[str, Any] = model_class(snake_case_ ).to(snake_case_ )
UpperCamelCase_: Dict = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase_: Optional[Any] = self.all_model_classes[1]
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_: int = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase_: Optional[Any] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Tuple ):
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase_: Any = self.all_model_classes[1]
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_: Optional[int] = True
UpperCamelCase_: Dict = True
UpperCamelCase_: Dict = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase_: Optional[int] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
UpperCamelCase_: List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_: Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_: List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase_ : Optional[Any] = 1E-4
def A__ ( ) -> List[str]:
UpperCamelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Union[str, Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(snake_case_ )
UpperCamelCase_: Tuple = self.default_image_processor
UpperCamelCase_: List[str] = prepare_img()
UpperCamelCase_: Union[str, Any] = image_processor(snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
UpperCamelCase_: List[str] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase_: Optional[int] = model(**snake_case_ )
UpperCamelCase_: int = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase_: Optional[int] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase_: Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(snake_case_ )
.eval()
)
UpperCamelCase_: Dict = self.default_image_processor
UpperCamelCase_: Optional[int] = prepare_img()
UpperCamelCase_: int = image_processor(snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
UpperCamelCase_: Tuple = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase_: Tuple = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase_: Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase_: Optional[Any] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase_: Tuple = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase_: str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_: Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(snake_case_ )
.eval()
)
UpperCamelCase_: str = self.default_image_processor
UpperCamelCase_: int = prepare_img()
UpperCamelCase_: Union[str, Any] = image_processor(snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
UpperCamelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase_: Union[str, Any] = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase_: Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase_: List[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase_: List[str] = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase_: Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_: Optional[int] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(snake_case_ )
.eval()
)
UpperCamelCase_: Dict = self.default_image_processor
UpperCamelCase_: Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
UpperCamelCase_: Dict = inputs["""pixel_values"""].to(snake_case_ )
UpperCamelCase_: List[str] = [el.to(snake_case_ ) for el in inputs["""mask_labels"""]]
UpperCamelCase_: List[str] = [el.to(snake_case_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase_: List[str] = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 548
|
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : Union[str, Any] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Any = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : Tuple = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def A__ ( lowerCamelCase ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_: Any = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Dict = f.readlines()
UpperCamelCase_: Tuple = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_: Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_: str = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_: Tuple = re.findall(r"""\[([^\]]+)\]""" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase_: Any = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_: Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase_: Tuple = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: str = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_: Tuple = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: List[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_: List[str] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase_: List[str] = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_: Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase_: Tuple = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_: Optional[int] = []
for key in import_dict_objects.keys():
UpperCamelCase_: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCamelCase_: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_: int = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[int] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_: Union[str, Any] = os.path.join(lowerCamelCase , """__init__.py""" )
UpperCamelCase_: Optional[int] = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_: Any = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase_: str = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_: Dict = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCamelCase_ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCamelCase_: Optional[Any] = direct_transformers_import(lowerCamelCase )
UpperCamelCase_: Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase_: List[Any] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase ) ) )
UpperCamelCase_: Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Dict = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''google/realm-cc-news-pretrained-embedder''': 5_1_2,
'''google/realm-cc-news-pretrained-encoder''': 5_1_2,
'''google/realm-cc-news-pretrained-scorer''': 5_1_2,
'''google/realm-cc-news-pretrained-openqa''': 5_1_2,
'''google/realm-orqa-nq-openqa''': 5_1_2,
'''google/realm-orqa-nq-reader''': 5_1_2,
'''google/realm-orqa-wq-openqa''': 5_1_2,
'''google/realm-orqa-wq-reader''': 5_1_2,
}
UpperCamelCase = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = RealmTokenizer
def __init__(self : Optional[Any] , _snake_case : Optional[Any]=None , _snake_case : List[Any]=None , _snake_case : str=True , _snake_case : List[Any]="[UNK]" , _snake_case : int="[SEP]" , _snake_case : List[Any]="[PAD]" , _snake_case : Dict="[CLS]" , _snake_case : Dict="[MASK]" , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=None , **_snake_case : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCamelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCamelCase_ : Optional[int] = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCamelCase_ : Optional[int] = do_lower_case
lowerCamelCase_ : Tuple = strip_accents
lowerCamelCase_ : List[Any] = tokenize_chinese_chars
lowerCamelCase_ : Union[str, Any] = normalizer_class(**_snake_case )
lowerCamelCase_ : Tuple = do_lower_case
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] , **_snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ : List[Any] = text
lowerCamelCase_ : List[Any] = kwargs.pop('text_pair' , _snake_case )
lowerCamelCase_ : Any = kwargs.pop('return_tensors' , _snake_case )
lowerCamelCase_ : str = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_snake_case ):
if batch_text_pair is not None:
lowerCamelCase_ : Union[str, Any] = batch_text_pair[idx]
else:
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Any = super().__call__(_snake_case , _snake_case , return_tensors=_snake_case , **_snake_case )
lowerCamelCase_ : Union[str, Any] = encoded_candidates.get('input_ids' )
lowerCamelCase_ : List[Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase_ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_snake_case )
lowerCamelCase_ : Dict = {key: item for key, item in output_data.items() if len(_snake_case ) != 0}
return BatchEncoding(_snake_case , tensor_type=_snake_case )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ (self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ (self : str , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 144
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__(self : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=3 , _snake_case : Tuple=32 , _snake_case : Any=3 , _snake_case : List[Any]=10 , _snake_case : List[Any]=[10, 20, 30, 40] , _snake_case : Union[str, Any]=[1, 1, 2, 1] , _snake_case : Tuple=True , _snake_case : Any=True , _snake_case : Optional[Any]="relu" , _snake_case : str=3 , _snake_case : Tuple=None , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : str = embeddings_size
lowerCamelCase_ : List[str] = hidden_sizes
lowerCamelCase_ : str = depths
lowerCamelCase_ : int = is_training
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[str] = num_labels
lowerCamelCase_ : List[str] = scope
lowerCamelCase_ : List[Any] = len(_snake_case )
def UpperCAmelCase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : str = self.get_config()
return config, pixel_values
def UpperCAmelCase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : Any , _snake_case : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = FlaxRegNetModel(config=_snake_case )
lowerCamelCase_ : str = model(_snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ (self : Tuple , _snake_case : Any , _snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : int = FlaxRegNetForImageClassification(config=_snake_case )
lowerCamelCase_ : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
lowerCamelCase_ : Any = False
def UpperCAmelCase_ (self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = FlaxRegNetModelTester(self )
lowerCamelCase_ : Tuple = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCAmelCase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ (self : int ) -> Optional[int]:
"""simple docstring"""
return
def UpperCAmelCase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase_ (self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCAmelCase_ (self : str ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCAmelCase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(_snake_case )
lowerCamelCase_ : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Dict = [*signature.parameters.keys()]
lowerCamelCase_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : int , _snake_case : Dict , _snake_case : Optional[Any] ):
lowerCamelCase_ : Optional[Any] = model_class(_snake_case )
lowerCamelCase_ : Dict = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCamelCase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
lowerCamelCase_ , lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Tuple = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase_ (self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Tuple = self._prepare_for_class(_snake_case , _snake_case )
lowerCamelCase_ : Dict = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case : Optional[Any] , **_snake_case : Tuple ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ : str = model_jitted(**_snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ : List[Any] = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( ) -> str:
lowerCamelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : str = prepare_img()
lowerCamelCase_ : int = image_processor(images=_snake_case , return_tensors='np' )
lowerCamelCase_ : Union[str, Any] = model(**_snake_case )
# verify the logits
lowerCamelCase_ : Any = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCamelCase_ : Optional[int] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
| 144
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_a : List[str] = logging.get_logger(__name__)
class __A (__magic_name__ ):
snake_case :Optional[Any] = ["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = 8 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : int = do_rescale
__UpperCAmelCase : Dict = rescale_factor
__UpperCAmelCase : List[str] = do_pad
__UpperCAmelCase : Optional[Any] = pad_size
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase , __UpperCAmelCase : Any = get_image_size(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
__UpperCAmelCase : Any = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
__UpperCAmelCase : Tuple = pad_size if pad_size is not None else self.pad_size
__UpperCAmelCase : int = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__UpperCAmelCase : Any = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__UpperCAmelCase : str = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__UpperCAmelCase : List[str] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__UpperCAmelCase : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 168
|
'''simple docstring'''
import os
import sys
import unittest
_a : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Dict = os.path.join(git_repo_path, "src", "transformers")
_a : Any = "\n{0} = None\n"
_a : List[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_a : Optional[Any] = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(UpperCamelCase_ )
__UpperCAmelCase : Any = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(UpperCamelCase_ , "tokenizers" )
__UpperCAmelCase : List[str] = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(UpperCamelCase_ , "tensorflow_text" )
__UpperCAmelCase : Optional[int] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(UpperCamelCase_ , "sentencepiece_and_tokenizers" )
__UpperCAmelCase : Dict = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(UpperCamelCase_ , "sentencepiece_and_tensorflow_text" )
__UpperCAmelCase : Optional[int] = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(UpperCamelCase_ , "sentencepiece_and_tokenizers_and_vision" )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCamelCase_ )
self.assertIn("tensorflow_text" , UpperCamelCase_ )
self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _snake_case ( self ):
__UpperCAmelCase : Any = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(UpperCamelCase_ , "\nCONSTANT = None\n" )
__UpperCAmelCase : Dict = create_dummy_object("function" , "'torch'" )
self.assertEqual(
UpperCamelCase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__UpperCAmelCase : Union[str, Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
__UpperCAmelCase : Any = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : int = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
__UpperCAmelCase : int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , UpperCamelCase_ )
| 168
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase_ = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =None
__UpperCAmelCase =len(UpperCAmelCase)
__UpperCAmelCase =[any_type for _ in range(self.N)] + arr
__UpperCAmelCase =fnc
self.build()
def A__ (self):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1):
__UpperCAmelCase =self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
p += self.N
__UpperCAmelCase =v
while p > 1:
__UpperCAmelCase =p // 2
__UpperCAmelCase =self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def A__ (self , UpperCAmelCase , UpperCAmelCase): # noqa: E741
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =l + self.N, r + self.N
__UpperCAmelCase =None
while l <= r:
if l % 2 == 1:
__UpperCAmelCase =self.st[l] if res is None else self.fn(UpperCAmelCase , self.st[l])
if r % 2 == 0:
__UpperCAmelCase =self.st[r] if res is None else self.fn(UpperCAmelCase , self.st[r])
__UpperCAmelCase , __UpperCAmelCase =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase_ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
UpperCamelCase_ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
UpperCamelCase_ = SegmentTree(test_array, min)
UpperCamelCase_ = SegmentTree(test_array, max)
UpperCamelCase_ = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE ( ) -> None:
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
__UpperCAmelCase =reduce(snake_case__ , test_array[i : j + 1] )
__UpperCAmelCase =reduce(snake_case__ , test_array[i : j + 1] )
__UpperCAmelCase =reduce(lambda snake_case__ , snake_case__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 142
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCamelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCamelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCamelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCamelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCamelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def A__ (self):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Value('''string'''),
}) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=[1, 1_0, 1_0_0] , UpperCAmelCase=4 , UpperCAmelCase=3.0):
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''')
with ThreadPoolExecutor(max_workers=UpperCAmelCase) as executor:
__UpperCAmelCase =[]
__UpperCAmelCase =Counter()
__UpperCAmelCase =0
__UpperCAmelCase =defaultdict(UpperCAmelCase)
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase , UpperCAmelCase)):
for candidate in candidates:
__UpperCAmelCase =candidate + '''\n''' + test_case
__UpperCAmelCase =(test_program, timeout, task_id, completion_id[task_id])
__UpperCAmelCase =executor.submit(UpperCAmelCase , *UpperCAmelCase)
futures.append(UpperCAmelCase)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase):
__UpperCAmelCase =future.result()
results[result["task_id"]].append((result['''completion_id'''], result))
__UpperCAmelCase , __UpperCAmelCase =[], []
for result in results.values():
result.sort()
__UpperCAmelCase =[r[1]['''passed'''] for r in result]
total.append(len(UpperCAmelCase))
correct.append(sum(UpperCAmelCase))
__UpperCAmelCase =np.array(UpperCAmelCase)
__UpperCAmelCase =np.array(UpperCAmelCase)
__UpperCAmelCase =k
__UpperCAmelCase ={f"""pass@{k}""": estimate_pass_at_k(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> str:
def estimator(snake_case__ , snake_case__ , snake_case__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case__ , snake_case__ ):
__UpperCAmelCase =itertools.repeat(snake_case__ , len(snake_case__ ) )
else:
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase =iter(snake_case__ )
return np.array([estimator(int(snake_case__ ) , int(snake_case__ ) , snake_case__ ) for n, c in zip(snake_case__ , snake_case__ )] )
| 142
| 1
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Tuple:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = to_pil_image(__a )
lowercase_ , lowercase_ : Optional[Any] = pil_image.size
lowercase_ : List[str] = pytesseract.image_to_data(__a , lang=__a , output_type="""dict""" , config=__a )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowercase_ : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
lowercase_ : Optional[int] = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
lowercase_ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
lowercase_ : Optional[int] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
lowercase_ : Dict = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
lowercase_ : Optional[Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase_ : Optional[int] = []
for x, y, w, h in zip(__a , __a , __a , __a ):
lowercase_ : str = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
lowercase_ : Any = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a , __a , __a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase__ ( __lowercase ):
'''simple docstring'''
__a : Optional[int] = ["""pixel_values"""]
def __init__( self, snake_case__ = True, snake_case__ = None, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = True, snake_case__ = 1 / 2_55, snake_case__ = True, snake_case__ = None, snake_case__ = None, snake_case__ = True, snake_case__ = None, snake_case__ = "", **snake_case__, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = size if size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = do_resize
lowercase_ : Optional[int] = size
lowercase_ : Optional[Any] = resample
lowercase_ : List[Any] = do_rescale
lowercase_ : Optional[Any] = rescale_value
lowercase_ : List[Any] = do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase_ : Tuple = apply_ocr
lowercase_ : Optional[Any] = ocr_lang
lowercase_ : str = tesseract_config
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = None, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
lowercase_ : Optional[Any] = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> Dict:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> str:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__=None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, **snake_case__, ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[str] = do_resize if do_resize is not None else self.do_resize
lowercase_ : int = size if size is not None else self.size
lowercase_ : int = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = resample if resample is not None else self.resample
lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ : List[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase_ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase_ : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase_ : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, """pytesseract""" )
lowercase_ : Dict = []
lowercase_ : Dict = []
for image in images:
lowercase_ , lowercase_ : Tuple = apply_tesseract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
lowercase_ : int = [self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ : Dict = BatchFeature(data={"""pixel_values""": images}, tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
lowercase_ : Optional[Any] = words_batch
lowercase_ : int = boxes_batch
return data
| 458
|
"""simple docstring"""
def _lowerCamelCase ( __a ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
while repunit:
SCREAMING_SNAKE_CASE_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCamelCase ( __a = 1_000_000 ):
SCREAMING_SNAKE_CASE_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__a ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 626
| 0
|
"""simple docstring"""
from collections import defaultdict
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
snake_case__ : List[str] = first_str.lower().strip()
snake_case__ : Dict = second_str.lower().strip()
# Remove whitespace
snake_case__ : Optional[int] = first_str.replace(" " , "" )
snake_case__ : Union[str, Any] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
snake_case__ : defaultdict[str, int] = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase = input("""Enter the first string """).strip()
lowercase = input("""Enter the second string """).strip()
lowercase = check_anagrams(input_a, input_b)
print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 150
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : str = '''altclip_text_model'''
def __init__( self , lowerCamelCase__=250_002 , lowerCamelCase__=1_024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4_096 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=768 , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
snake_case__ : str = vocab_size
snake_case__ : int = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Any = type_vocab_size
snake_case__ : int = initializer_range
snake_case__ : Dict = initializer_factor
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : List[str] = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : Dict = project_dim
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : int = '''altclip_vision_model'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=3_072 , lowerCamelCase__=512 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3 , lowerCamelCase__=224 , lowerCamelCase__=32 , lowerCamelCase__="quick_gelu" , lowerCamelCase__=1E-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1.0 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__)
snake_case__ : str = hidden_size
snake_case__ : List[Any] = intermediate_size
snake_case__ : Union[str, Any] = projection_dim
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = patch_size
snake_case__ : List[Any] = image_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = initializer_factor
snake_case__ : int = attention_dropout
snake_case__ : List[str] = layer_norm_eps
snake_case__ : List[str] = hidden_act
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , **lowerCamelCase__) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__)
snake_case__, snake_case__ : str = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type") == "altclip":
snake_case__ : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__)
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = '''altclip'''
__magic_name__ : Union[str, Any] = True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=768 , lowerCamelCase__=2.65_92 , **lowerCamelCase__) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = kwargs.pop("text_config_dict" , lowerCamelCase__)
snake_case__ : str = kwargs.pop("vision_config_dict" , lowerCamelCase__)
super().__init__(**lowerCamelCase__)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case__ : str = {}
# This is the complete result when using `text_config_dict`.
snake_case__ : str = AltCLIPTextConfig(**lowerCamelCase__).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case__ : List[Any] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ : Dict = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
snake_case__ : List[Any] = {}
# This is the complete result when using `vision_config_dict`.
snake_case__ : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case__ : List[Any] = {
str(lowerCamelCase__): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case__ : Any = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case__ : int = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
snake_case__ : Tuple = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.")
if vision_config is None:
snake_case__ : List[Any] = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.")
snake_case__ : Any = AltCLIPTextConfig(**lowerCamelCase__)
snake_case__ : Dict = AltCLIPVisionConfig(**lowerCamelCase__)
snake_case__ : List[str] = projection_dim
snake_case__ : Tuple = logit_scale_init_value
snake_case__ : List[Any] = 1.0
@classmethod
def UpperCAmelCase ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
snake_case__ : Any = copy.deepcopy(self.__dict__)
snake_case__ : Optional[Any] = self.text_config.to_dict()
snake_case__ : List[str] = self.vision_config.to_dict()
snake_case__ : str = self.__class__.model_type
return output
| 150
| 1
|
"""simple docstring"""
from PIL import Image
def __A (_SCREAMING_SNAKE_CASE ) ->Image:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Dict = image.size
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Tuple = image.load()
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 93
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A_ : Tuple = TypeVar("T")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
A__ = 4_2 # Cache store of keys
A__ = 4_2 # References of the keys in cache
A__ = 1_0 # Maximum capacity of cache
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = deque()
lowerCamelCase__ = set()
if not n:
lowerCamelCase__ = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowerCamelCase__ = n
def __magic_name__ ( self , _lowerCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCamelCase__ = self.dq_store.pop()
self.key_reference.remove(_lowerCAmelCase )
else:
self.dq_store.remove(_lowerCAmelCase )
self.dq_store.appendleft(_lowerCAmelCase )
self.key_reference.add(_lowerCAmelCase )
def __magic_name__ ( self ):
for k in self.dq_store:
print(_lowerCAmelCase )
def __repr__( self ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Union[str, Any] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 701
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Union[str, Any] = logging.get_logger(__name__)
A : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''detr'''
A__ = ['''past_key_values''']
A__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self : Any , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=100 , _UpperCAmelCase : Tuple=6 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=8 , _UpperCAmelCase : int=6 , _UpperCAmelCase : Optional[int]=2048 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str="relu" , _UpperCAmelCase : int=256 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[Any]="sine" , _UpperCAmelCase : Union[str, Any]="resnet50" , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : int=5 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : List[str]=0.1 , **_UpperCAmelCase : str , ) -> str:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = backbone_config.get("""model_type""" )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
lowercase__ , lowercase__ , lowercase__ = None, None, None
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = encoder_layers
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def lowerCamelCase__ (cls : Tuple , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase__ (self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
return 12
| 15
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = CpmAntTokenizer
_A : str = False
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase (self ):
snake_case_ : Dict = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case_ : Any = """今天天气真好!"""
snake_case_ : str = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case_ : Dict = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[int] = """今天天气真好!"""
snake_case_ : Dict = [tokenizer.bos_token] + tokens
snake_case_ : int = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 480
| 0
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( __a ):
"""simple docstring"""
A_ = '''detr'''
A_ = ['''past_key_values''']
A_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , lowercase_ : Tuple=True , lowercase_ : List[str]=None , lowercase_ : Tuple=3 , lowercase_ : Optional[Any]=100 , lowercase_ : Optional[Any]=6 , lowercase_ : Optional[Any]=2_048 , lowercase_ : List[str]=8 , lowercase_ : str=6 , lowercase_ : str=2_048 , lowercase_ : Optional[Any]=8 , lowercase_ : Any=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=True , lowercase_ : int="relu" , lowercase_ : Tuple=256 , lowercase_ : Any=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=0.0_2 , lowercase_ : Any=1.0 , lowercase_ : Tuple=False , lowercase_ : List[Any]="sine" , lowercase_ : str="resnet50" , lowercase_ : int=True , lowercase_ : List[Any]=False , lowercase_ : List[str]=1 , lowercase_ : List[str]=5 , lowercase_ : List[Any]=2 , lowercase_ : Tuple=1 , lowercase_ : Optional[Any]=1 , lowercase_ : Any=5 , lowercase_ : Dict=2 , lowercase_ : Dict=0.1 , **lowercase_ : Dict , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowercase_ = backbone_config.get("""model_type""" )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowercase_ , lowercase_ , lowercase_ = None, None, None
lowercase_ = use_timm_backbone
lowercase_ = backbone_config
lowercase_ = num_channels
lowercase_ = num_queries
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = encoder_layers
lowercase_ = auxiliary_loss
lowercase_ = position_embedding_type
lowercase_ = backbone
lowercase_ = use_pretrained_backbone
lowercase_ = dilation
# Hungarian matcher
lowercase_ = class_cost
lowercase_ = bbox_cost
lowercase_ = giou_cost
# Loss coefficients
lowercase_ = mask_loss_coefficient
lowercase_ = dice_loss_coefficient
lowercase_ = bbox_loss_coefficient
lowercase_ = giou_loss_coefficient
lowercase_ = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
@classmethod
def lowerCamelCase__ ( cls : str , lowercase_ : PretrainedConfig , **lowercase_ : str ):
'''simple docstring'''
return cls(backbone_config=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
class _a ( __a ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return 12
| 705
|
'''simple docstring'''
import os
def A_ ( ) ->Any:
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + """/p022_names.txt""" ) as file:
lowercase_ = str(file.readlines()[0] )
lowercase_ = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
lowercase_ = 0
lowercase_ = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE_ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE_ ) - 64
total_score += (i + 1) * name_score
lowercase_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 603
| 0
|
from __future__ import annotations
lowercase_ = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowercase_ = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Any = []
__lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : float = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if arr[i] < arr[j]:
__lowerCamelCase : Dict = arr[j]
break
result.append(SCREAMING_SNAKE_CASE__ )
return result
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowerCamelCase : List[Any] = inner
break
result.append(SCREAMING_SNAKE_CASE__ )
return result
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : list[float] = []
__lowerCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowerCamelCase : Tuple = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowercase_ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 669
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case__ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720
|
from __future__ import annotations
import bisect
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
if hi < 0:
__lowercase = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
__lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowercase = mid + 1
else:
__lowercase = mid
return lo
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ):
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = 0
__lowercase = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
__lowercase = left + (right - left) // 2
__lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowercase = midpoint - 1
else:
__lowercase = midpoint + 1
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if right < left:
return None
__lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
snake_case__ : Any = sorted(int(item) for item in user_input.split(""","""))
snake_case__ : Any = int(input("""Enter a single number to be found in the list:\n"""))
snake_case__ : List[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 655
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__magic_name__ = {'''facebook/blenderbot_small-90M''': 512}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ):
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
snake_case__ = json.load(lowerCamelCase )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
snake_case__ = merges_handle.read().split("\n" )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
snake_case__ = re.sub("([.,!?()])" , r" \1" , lowerCamelCase )
snake_case__ = re.sub("(')" , r" \1 " , lowerCamelCase )
snake_case__ = re.sub(r"\s{2,}" , " " , lowerCamelCase )
if "\n" in token:
snake_case__ = token.replace("\n" , " __newln__" )
snake_case__ = token.split(" " )
snake_case__ = []
for token in tokens:
if not len(lowerCamelCase ):
continue
snake_case__ = token.lower()
snake_case__ = tuple(lowerCamelCase )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
snake_case__ = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase ):
try:
snake_case__ = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
snake_case__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(lowerCamelCase )
snake_case__ = new_word
if len(lowerCamelCase ) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase )
snake_case__ = "@@ ".join(lowerCamelCase )
snake_case__ = word[:-4]
snake_case__ = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def A_ ( self , lowerCamelCase ):
snake_case__ = []
snake_case__ = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def A_ ( self , lowerCamelCase ):
snake_case__ = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
snake_case__ = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
snake_case__ = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
snake_case__ = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 276
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__magic_name__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__magic_name__ = {
'''ctrl''': 256,
}
__magic_name__ = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = CONTROL_CODES
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<unk>" , **lowerCamelCase ):
super().__init__(unk_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
snake_case__ = json.load(lowerCamelCase )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
snake_case__ = merges_handle.read().split("\n" )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(lowerCamelCase )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
snake_case__ = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase ):
try:
snake_case__ = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(lowerCamelCase )
snake_case__ = new_word
if len(lowerCamelCase ) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase )
snake_case__ = "@@ ".join(lowerCamelCase )
snake_case__ = word[:-4]
snake_case__ = word
return word
def A_ ( self , lowerCamelCase ):
snake_case__ = []
snake_case__ = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def A_ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
snake_case__ = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
snake_case__ = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
snake_case__ = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 276
| 1
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[Any]=None):
# Recurse if needed
if "." in tensor_name:
A_ : Union[str, Any] = tensor_name.split(""".""")
for split in splits[:-1]:
A_ : List[str] = getattr(lowerCamelCase , lowerCamelCase)
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.')
A_ : Any = new_module
A_ : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.')
A_ : Union[str, Any] = tensor_name in module._buffers
A_ : str = getattr(lowerCamelCase , lowerCamelCase)
if old_value.device == torch.device("""meta""") and device not in ["meta", torch.device("""meta""")] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.')
A_ : int = False
A_ : Any = False
if is_buffer or not is_bitsandbytes_available():
A_ : Tuple = False
A_ : str = False
else:
A_ : Any = hasattr(bnb.nn , """Params4bit""") and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
A_ : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
A_ : Optional[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A_ : int = old_value.to(lowerCamelCase)
elif isinstance(lowerCamelCase , torch.Tensor):
A_ : Tuple = value.to("""cpu""")
if value.dtype == torch.inta:
A_ : int = version.parse(importlib.metadata.version("""bitsandbytes""")) > version.parse(
"""0.37.2""")
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""")
else:
A_ : List[str] = torch.tensor(lowerCamelCase , device="""cpu""")
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCamelCase) and fpaa_statistics is None:
A_ : Tuple = new_value.T
A_ : str = old_value.__dict__
if is_abit:
A_ : Dict = bnb.nn.IntaParams(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase).to(lowerCamelCase)
elif is_abit:
A_ : List[str] = bnb.nn.Paramsabit(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase).to(lowerCamelCase)
A_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCamelCase))
else:
if value is None:
A_ : Optional[Any] = old_value.to(lowerCamelCase)
elif isinstance(lowerCamelCase , torch.Tensor):
A_ : List[Any] = value.to(lowerCamelCase)
else:
A_ : Any = torch.tensor(lowerCamelCase , device=lowerCamelCase)
if is_buffer:
A_ : Dict = new_value
else:
A_ : List[Any] = nn.Parameter(lowerCamelCase , requires_grad=old_value.requires_grad)
A_ : Any = new_value
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=False):
for name, module in model.named_children():
if current_key_name is None:
A_ : Tuple = []
current_key_name.append(lowerCamelCase)
if (isinstance(lowerCamelCase , nn.Linear) or isinstance(lowerCamelCase , lowerCamelCase)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCamelCase) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(lowerCamelCase , lowerCamelCase):
A_ , A_ : List[Any] = module.weight.shape
else:
A_ : Optional[Any] = module.in_features
A_ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A_ : List[Any] = bnb.nn.LinearabitLt(
lowerCamelCase , lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A_ : Any = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A_ : Optional[int] = bnb.nn.Linearabit(
lowerCamelCase , lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A_ : Tuple = True
# Store the module class in case we need to transpose the weight later
A_ : int = type(lowerCamelCase)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCamelCase)
if len(list(module.children())) > 0:
A_ , A_ : Any = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_been_replaced=lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]=None):
A_ : str = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
A_ , A_ : str = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""")
return model
def lowerCamelCase ( *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any]):
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCamelCase , )
return replace_with_bnb_linear(*lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( *lowerCamelCase : Tuple , **lowerCamelCase : Dict):
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCamelCase , )
return set_module_quantized_tensor_to_device(*lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = deepcopy(lowerCamelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A_ : Optional[Any] = find_tied_parameters(lowerCamelCase)
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : str = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
A_ : Dict = sum(lowerCamelCase , [])
A_ : List[Any] = len(lowerCamelCase) > 0
# Check if it is a base model
A_ : Union[str, Any] = not hasattr(lowerCamelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : int = list(model.named_children())
A_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
A_ : Any = set(lowerCamelCase) - set(lowerCamelCase)
A_ : Any = list(set(lowerCamelCase)) + list(lowerCamelCase)
# remove ".weight" from the keys
A_ : Dict = [""".weight""", """.bias"""]
A_ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : Any = name.replace(lowerCamelCase , """""")
filtered_module_names.append(lowerCamelCase)
return filtered_module_names
| 27
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : float ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ):
'''simple docstring'''
super().__init__()
__a = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__a = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__a = None
__a = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :VQModel
a_ :CLIPTextModel
a_ :CLIPTokenizer
a_ :TransformeraDModel
a_ :LearnedClassifierFreeSamplingEmbeddings
a_ :VQDiffusionScheduler
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__a = text_input_ids[:, : self.tokenizer.model_max_length]
__a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__a = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__a = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__a = self.learned_classifier_free_sampling_embeddings.embeddings
__a = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__a = [""""""] * batch_size
__a = text_input_ids.shape[-1]
__a = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__a = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a = negative_prompt_embeds.shape[1]
__a = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__a = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = batch_size * num_images_per_prompt
__a = guidance_scale > 1.0
__a = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__a = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__a = self.transformer.num_vector_embeds - 1
__a = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__a = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__a = self.scheduler.timesteps.to(self.device )
__a = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__a = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__a = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__a , __a = model_output.chunk(2 )
__a = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__a = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__a = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = self.vqvae.config.vq_embed_dim
__a = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__a = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__a = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
__a , __a = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__a = torch.exp(SCREAMING_SNAKE_CASE__ )
__a = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__a = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__a = torch.cat((all_true, keep_mask) , dim=1 )
__a = keep_mask[:, :-1, :]
__a = keep_mask.gather(1 , indices.argsort(1 ) )
__a = log_p_x_0.clone()
__a = -torch.inf # -inf = log(0)
return rv
| 582
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->bool:
A__ : List[Any] = [int(UpperCAmelCase__ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCAmelCase__ ) == 4 and all(0 <= int(UpperCAmelCase__ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
A_ = input().strip()
A_ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 498
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : List[Any] = self.dummy_uncond_unet
A__ : Dict = ScoreSdeVeScheduler()
A__ : str = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
A__ : Optional[int] = torch.manual_seed(0 )
A__ : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case ).images
A__ : List[str] = torch.manual_seed(0 )
A__ : int = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case , return_dict=snake_case )[
0
]
A__ : int = image[0, -3:, -3:, -1]
A__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = """google/ncsnpp-church-256"""
A__ : Tuple = UNetaDModel.from_pretrained(snake_case )
A__ : int = ScoreSdeVeScheduler.from_pretrained(snake_case )
A__ : List[Any] = ScoreSdeVePipeline(unet=snake_case , scheduler=snake_case )
sde_ve.to(snake_case )
sde_ve.set_progress_bar_config(disable=snake_case )
A__ : Dict = torch.manual_seed(0 )
A__ : Optional[int] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=snake_case ).images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 498
| 1
|
import os
import numpy
import onnx
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = a == b
SCREAMING_SNAKE_CASE_ : str = name_a
SCREAMING_SNAKE_CASE_ : str = name_b
return res
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[str] = inits[i].name
SCREAMING_SNAKE_CASE_ : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = os.path.dirname(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Any = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : int = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = inits[j].data_type
SCREAMING_SNAKE_CASE_ : str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_ )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : int = inits[i].name
SCREAMING_SNAKE_CASE_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
SCREAMING_SNAKE_CASE_ : int = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 105
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( A__: str=None , A__: List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__SCREAMING_SNAKE_CASE = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Benchmark training of model"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Verbose memory tracing"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Trace memory line by line"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Save result to a CSV file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Save all print statements in a log file"""} )
__SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"""help""": """Whether to print environment information"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__SCREAMING_SNAKE_CASE = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__SCREAMING_SNAKE_CASE = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _snake_case , )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 254
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 700
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 698
| 0
|
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1e-12 , _SCREAMING_SNAKE_CASE = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_SCREAMING_SNAKE_CASE ) == np.iscomplexobj(_SCREAMING_SNAKE_CASE )
_A = np.iscomplexobj(_SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_A = False
_A = 0
_A = 0
_A = 1e12
while not convergence:
# Multiple matrix by the vector.
_A = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
_A = w / np.linalg.norm(_SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_A = vector.conj().T if is_complex else vector.T
_A = np.dot(_SCREAMING_SNAKE_CASE , np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Check convergence.
_A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_A = True
_A = lambda_
if is_complex:
_A = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
_A = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_A = np.array([41, 4, 20] )
_A = real_input_matrix.astype(np.complexaaa )
_A = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_A = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_A = real_input_matrix
_A = real_vector
elif problem_type == "complex":
_A = complex_input_matrix
_A = complex_vector
# Our implementation.
_A, _A = power_iteration(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_A, _A = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
_A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_SCREAMING_SNAKE_CASE ) - np.abs(_SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 27
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27
| 1
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : Optional[Any] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE : List[str] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(_A ) ),
} , features=_A , )
return dataset
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_A )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE : Union[str, Any] = FILE_CONTENT
with open(_A , """w""" ) as f:
f.write(_A )
return filename
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
import bza
SCREAMING_SNAKE_CASE : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE : Optional[Any] = bytes(_A , """utf-8""" )
with bza.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
import gzip
SCREAMING_SNAKE_CASE : Any = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE : str = bytes(_A , """utf-8""" )
with gzip.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE : int = bytes(_A , """utf-8""" )
with lza.frame.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_A , """w""" ) as archive:
archive.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
import lzma
SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE : int = bytes(_A , """utf-8""" )
with lzma.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import zipfile
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE : List[str] = bytes(_A , """utf-8""" )
with zstd.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_A , """w""" ) as f:
f.write(_A )
return filename
__UpperCAmelCase = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__UpperCAmelCase = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__UpperCAmelCase = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__UpperCAmelCase = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = datasets.Dataset.from_dict(_A )
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_A ) ) as con:
SCREAMING_SNAKE_CASE : int = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_A , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE : int = csv.DictWriter(_A , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_A , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE : Tuple = csv.DictWriter(_A , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import bza
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_A , """rb""" ) as f:
SCREAMING_SNAKE_CASE : str = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(_A , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE : Dict = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_A , """wb""" ) as f:
SCREAMING_SNAKE_CASE : Any = pq.ParquetWriter(_A , schema=_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_A ) )] for k in DATA[0]} , schema=_A )
writer.write_table(_A )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {"""data""": DATA}
with open(_A , """w""" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(_A , """w""" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import gzip
SCREAMING_SNAKE_CASE : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_A , """rb""" ) as orig_file:
with gzip.open(_A , """wb""" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import gzip
SCREAMING_SNAKE_CASE : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_A , """rb""" ) as orig_file:
with gzip.open(_A , """wb""" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""nested""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.join("""nested""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_A , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_A , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_A , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_A , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 719
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
__UpperCAmelCase = """▁"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 79
| 0
|
import gc
import threading
import time
import psutil
import torch
class __lowercase :
def __init__( self : Dict ) -> Any:
'''simple docstring'''
lowercase = psutil.Process()
lowercase = False
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = -1
while True:
lowercase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = True
lowercase = threading.Thread(target=self.peak_monitor )
lowercase = True
self.thread.start()
def __a ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase = False
self.thread.join()
return self.cpu_memory_peak
A_ = PeakCPUMemory()
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = torch.cuda.memory_allocated(__lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = (torch.cuda.memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20
lowercase = (torch.cuda.max_memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20
return measures
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Any:
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(__lowercase )]:.2f}MiB' )
lowercase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 604
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a : Any = logging.get_logger(__name__)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 637
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase_ : Any = None
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase_ : List[str] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
lowerCamelCase_ : Dict = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
lowerCamelCase_ : int = "▁"
# Segments (not really needed)
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Optional[int] = 1
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : Tuple = 4
class _lowerCamelCase (_snake_case ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = 'left'
lowercase__ = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ):
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 713
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase_ : List[Any] = getLogger(__name__)
lowerCamelCase_ : str = "cuda" if torch.cuda.is_available() else "cpu"
def __lowercase( __snake_case : List[str] ,__snake_case : str ,__snake_case : str ,__snake_case : int = 8 ,__snake_case : str = DEFAULT_DEVICE ,__snake_case : Tuple=False ,__snake_case : Dict="summarization" ,__snake_case : Optional[Any]=None ,**__snake_case : Any ,) -> Dict:
__snake_case = Path(__snake_case ).open('w' ,encoding='utf-8' )
__snake_case = str(__snake_case )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(__snake_case ).to(__snake_case )
if fpaa:
__snake_case = model.half()
__snake_case = AutoTokenizer.from_pretrained(__snake_case )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
__snake_case = time.time()
# update config with task specific params
use_task_specific_params(__snake_case ,__snake_case )
if prefix is None:
__snake_case = prefix or getattr(model.config ,'prefix' ,'' ) or ''
for examples_chunk in tqdm(list(chunks(__snake_case ,__snake_case ) ) ):
__snake_case = [prefix + text for text in examples_chunk]
__snake_case = tokenizer(__snake_case ,return_tensors='pt' ,truncation=__snake_case ,padding='longest' ).to(__snake_case )
__snake_case = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**__snake_case ,)
__snake_case = tokenizer.batch_decode(__snake_case ,skip_special_tokens=__snake_case ,clean_up_tokenization_spaces=__snake_case )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__snake_case = int(time.time() - start_time ) # seconds
__snake_case = len(__snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def __lowercase( ) -> Dict:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowercase( __snake_case : List[str]=True ) -> str:
__snake_case = argparse.ArgumentParser()
parser.add_argument('model_name' ,type=__snake_case ,help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' ,type=__snake_case ,help='like cnn_dm/test.source' )
parser.add_argument('save_path' ,type=__snake_case ,help='where to save summaries' )
parser.add_argument('--reference_path' ,type=__snake_case ,required=__snake_case ,help='like cnn_dm/test.target' )
parser.add_argument('--score_path' ,type=__snake_case ,required=__snake_case ,default='metrics.json' ,help='where to save metrics' )
parser.add_argument('--device' ,type=__snake_case ,required=__snake_case ,default=__snake_case ,help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' ,type=__snake_case ,required=__snake_case ,default=__snake_case ,help='will be added to the begininng of src examples' )
parser.add_argument('--task' ,type=__snake_case ,default='summarization' ,help='used for task_specific_params + metrics' )
parser.add_argument('--bs' ,type=__snake_case ,default=8 ,required=__snake_case ,help='batch size' )
parser.add_argument(
'--n_obs' ,type=__snake_case ,default=-1 ,required=__snake_case ,help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' ,action='store_true' )
parser.add_argument('--dump-args' ,action='store_true' ,help='print the custom hparams with the results' )
parser.add_argument(
'--info' ,nargs='?' ,type=__snake_case ,const=datetime_now() ,help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__snake_case , __snake_case = parser.parse_known_args()
__snake_case = parse_numeric_n_bool_cl_kwargs(__snake_case )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
__snake_case = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__snake_case = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__snake_case = generate_summaries_or_translations(
__snake_case ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**__snake_case ,)
if args.reference_path is None:
return {}
# Compute scores
__snake_case = calculate_bleu if 'translation' in args.task else calculate_rouge
__snake_case = [x.rstrip() for x in open(args.save_path ).readlines()]
__snake_case = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__snake_case )]
__snake_case = score_fn(__snake_case ,__snake_case )
scores.update(__snake_case )
if args.dump_args:
scores.update(__snake_case )
if args.info:
__snake_case = args.info
if verbose:
print(__snake_case )
if args.score_path is not None:
json.dump(__snake_case ,open(args.score_path ,'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 345
| 0
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["audio_values", "audio_mask"]
def __init__( self , UpperCamelCase_=20_48 , UpperCamelCase_=1 , UpperCamelCase_=[16, 16] , UpperCamelCase_=1_28 , UpperCamelCase_=4_41_00 , UpperCamelCase_=86 , UpperCamelCase_=20_48 , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> Dict:
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : str = spectrogram_length
__lowercase : int = num_channels
__lowercase : List[str] = patch_size
__lowercase : Any = feature_size // self.patch_size[1]
__lowercase : int = n_fft
__lowercase : Optional[int] = sampling_rate // hop_length_to_sampling_rate
__lowercase : int = sampling_rate
__lowercase : Optional[Any] = padding_value
__lowercase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase_ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def _lowerCamelCase ( self , UpperCamelCase_ ) -> np.ndarray:
__lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=8_0.0 , )
__lowercase : int = log_spec[:, :-1]
__lowercase : Dict = log_spec - 2_0.0
__lowercase : List[str] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , **UpperCamelCase_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowercase : Any = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__lowercase : Tuple = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
__lowercase : List[str] = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase : Optional[int] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase_ ):
__lowercase : List[Any] = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase : Tuple = np.array(UpperCamelCase_ ).astype(np.floataa )
# convert into correct format for padding
__lowercase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase : Optional[Any] = np.ones([len(UpperCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase : int = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase_ ) ):
__lowercase : Tuple = audio_features[i]
__lowercase : int = feature
# return as BatchFeature
if return_attention_mask:
__lowercase : List[str] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__lowercase : Union[str, Any] = {'''audio_values''': padded_audio_features}
__lowercase : Any = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 76
|
from __future__ import annotations
def _a ( UpperCamelCase_ : list[int] , UpperCamelCase_ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase__ = i + 1
else:
lowerCAmelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 11, 15], 9) = }")
| 339
| 0
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Any:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def __snake_case ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __new__( cls : Optional[Any] , a__ : Union[str, Any] , a__ : int , a__ : Dict ):
UpperCAmelCase = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , '''key_handler''' ):
setattr(a__ , '''key_handler''' , {} )
setattr(a__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase = getattr(a__ , '''handle_key''' , [] )
for key in handled_keys:
UpperCAmelCase = value
return new_cls
@staticmethod
def __snake_case ( cls : Tuple ):
UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase = ord(a__ )
UpperCAmelCase = cls.key_handler.get(a__ )
if handler:
UpperCAmelCase = char
return handler(cls )
else:
return None
def __snake_case ( cls : Dict ) -> Optional[Any]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 711
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableDiffusionInpaintPipeline
_lowerCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase =frozenset([] )
def __snake_case ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
UpperCAmelCase = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase = CLIPTextModel(a__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __snake_case ( self : Dict , a__ : List[Any] , a__ : Tuple=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : int ):
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = StableDiffusionInpaintPipeline(**a__ )
UpperCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase = self.get_dummy_inputs(a__ )
UpperCAmelCase = sd_pipe(**a__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __snake_case ( self : int ):
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __snake_case ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 570
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase = logging.get_logger(__name__)
_lowercase = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """blip_2_vision_model"""
def __init__( self : Optional[int] , __a : Dict=1408 , __a : Tuple=6144 , __a : Tuple=39 , __a : Any=16 , __a : Union[str, Any]=224 , __a : Union[str, Any]=14 , __a : List[Any]="gelu" , __a : Optional[int]=0.00_001 , __a : List[Any]=0.0 , __a : List[str]=1e-10 , __a : Any=True , **__a : str , ):
'''simple docstring'''
super().__init__(**__a )
lowerCamelCase__: Any = hidden_size
lowerCamelCase__: Any = intermediate_size
lowerCamelCase__: List[Any] = num_hidden_layers
lowerCamelCase__: Union[str, Any] = num_attention_heads
lowerCamelCase__: Optional[int] = patch_size
lowerCamelCase__: str = image_size
lowerCamelCase__: List[Any] = initializer_range
lowerCamelCase__: List[Any] = attention_dropout
lowerCamelCase__: int = layer_norm_eps
lowerCamelCase__: int = hidden_act
lowerCamelCase__: Optional[int] = qkv_bias
@classmethod
def lowerCamelCase_ ( cls : List[str] , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
lowerCamelCase__ , lowerCamelCase__: List[str] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase__: str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """blip_2_qformer"""
def __init__( self : int , __a : Union[str, Any]=30522 , __a : Optional[Any]=768 , __a : List[str]=12 , __a : Any=12 , __a : int=3072 , __a : str="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Any=512 , __a : Any=0.02 , __a : Union[str, Any]=1e-12 , __a : Optional[int]=0 , __a : List[Any]="absolute" , __a : int=2 , __a : Dict=1408 , **__a : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase__: List[str] = vocab_size
lowerCamelCase__: int = hidden_size
lowerCamelCase__: int = num_hidden_layers
lowerCamelCase__: List[Any] = num_attention_heads
lowerCamelCase__: Tuple = hidden_act
lowerCamelCase__: int = intermediate_size
lowerCamelCase__: Any = hidden_dropout_prob
lowerCamelCase__: List[str] = attention_probs_dropout_prob
lowerCamelCase__: int = max_position_embeddings
lowerCamelCase__: List[str] = initializer_range
lowerCamelCase__: Any = layer_norm_eps
lowerCamelCase__: str = position_embedding_type
lowerCamelCase__: List[str] = cross_attention_frequency
lowerCamelCase__: Dict = encoder_hidden_size
@classmethod
def lowerCamelCase_ ( cls : List[Any] , __a : Union[str, os.PathLike] , **__a : int ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
lowerCamelCase__ , lowerCamelCase__: Dict = cls.get_config_dict(__a , **__a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase__: List[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """blip-2"""
__lowerCamelCase = True
def __init__( self : Union[str, Any] , __a : List[str]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Dict=32 , **__a : Optional[int] ):
'''simple docstring'''
super().__init__(**__a )
if vision_config is None:
lowerCamelCase__: Tuple = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowerCamelCase__: Tuple = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowerCamelCase__: Dict = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCamelCase__: int = BlipaVisionConfig(**__a )
lowerCamelCase__: Optional[int] = BlipaQFormerConfig(**__a )
lowerCamelCase__: Optional[Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase__: List[Any] = CONFIG_MAPPING[text_model_type](**__a )
lowerCamelCase__: Optional[int] = self.text_config.tie_word_embeddings
lowerCamelCase__: Union[str, Any] = self.text_config.is_encoder_decoder
lowerCamelCase__: Any = num_query_tokens
lowerCamelCase__: List[Any] = self.vision_config.hidden_size
lowerCamelCase__: Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__: List[str] = 1.0
lowerCamelCase__: Tuple = 0.02
@classmethod
def lowerCamelCase_ ( cls : int , __a : BlipaVisionConfig , __a : BlipaQFormerConfig , __a : PretrainedConfig , **__a : Any , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__a , )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = copy.deepcopy(self.__dict__ )
lowerCamelCase__: Any = self.vision_config.to_dict()
lowerCamelCase__: Union[str, Any] = self.qformer_config.to_dict()
lowerCamelCase__: Optional[int] = self.text_config.to_dict()
lowerCamelCase__: List[Any] = self.__class__.model_type
return output
| 306
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase ) -> Tuple:
'''simple docstring'''
with open(_UpperCamelCase , """r""" ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
_lowercase = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_lowercase = torch.device('cuda', local_rank)
_lowercase = socket.gethostname()
_lowercase = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_lowercase = dist.get_rank()
_lowercase = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 306
| 1
|
'''simple docstring'''
def __UpperCamelCase( _A : int , _A : int ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(_A , x % y )
def __UpperCamelCase( _A : int , _A : int ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(_A , _A )
def __UpperCamelCase( _A : int = 20 ):
'''simple docstring'''
UpperCAmelCase__ : Any = 1
for i in range(1 , n + 1 ):
UpperCAmelCase__ : Dict = lcm(_A , _A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = '''time_series_transformer'''
UpperCAmelCase_ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = "student_t" ,lowerCamelCase_ = "nll" ,lowerCamelCase_ = 1 ,lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7] ,lowerCamelCase_ = "mean" ,lowerCamelCase_ = 0 ,lowerCamelCase_ = 0 ,lowerCamelCase_ = 0 ,lowerCamelCase_ = 0 ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = 32 ,lowerCamelCase_ = 32 ,lowerCamelCase_ = 2 ,lowerCamelCase_ = 2 ,lowerCamelCase_ = 2 ,lowerCamelCase_ = 2 ,lowerCamelCase_ = True ,lowerCamelCase_ = "gelu" ,lowerCamelCase_ = 64 ,lowerCamelCase_ = 0.1 ,lowerCamelCase_ = 0.1 ,lowerCamelCase_ = 0.1 ,lowerCamelCase_ = 0.1 ,lowerCamelCase_ = 0.1 ,lowerCamelCase_ = 100 ,lowerCamelCase_ = 0.02 ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = prediction_length
UpperCAmelCase__ : Optional[Any] = context_length or prediction_length
UpperCAmelCase__ : Union[str, Any] = distribution_output
UpperCAmelCase__ : List[Any] = loss
UpperCAmelCase__ : Optional[int] = input_size
UpperCAmelCase__ : Tuple = num_time_features
UpperCAmelCase__ : str = lags_sequence
UpperCAmelCase__ : Union[str, Any] = scaling
UpperCAmelCase__ : Optional[int] = num_dynamic_real_features
UpperCAmelCase__ : Optional[Any] = num_static_real_features
UpperCAmelCase__ : str = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase__ : Union[str, Any] = cardinality
else:
UpperCAmelCase__ : List[str] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase__ : int = embedding_dimension
else:
UpperCAmelCase__ : Union[str, Any] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : int = input_size * len(lowerCamelCase_ ) + self._number_of_features
UpperCAmelCase__ : List[Any] = d_model
UpperCAmelCase__ : Optional[Any] = encoder_attention_heads
UpperCAmelCase__ : Optional[int] = decoder_attention_heads
UpperCAmelCase__ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase__ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = encoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : Optional[Any] = dropout
UpperCAmelCase__ : Any = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : List[Any] = encoder_layerdrop
UpperCAmelCase__ : Optional[int] = decoder_layerdrop
UpperCAmelCase__ : List[Any] = activation_function
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase_ ,**lowerCamelCase_ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 496
| 0
|
"""simple docstring"""
__UpperCAmelCase = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 642
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def UpperCAmelCase ( self : int , a_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a_ , a_ ):
a__ : Any = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] , a_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(a_ ) )
if isinstance(a_ , a_ ):
a__ : str = [sequences]
a__ : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *a_ : Tuple , **a_ : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCAmelCase ( self : Optional[int] , a_ : List[Any] , a_ : int=True , a_ : Tuple=True , a_ : Tuple=TruncationStrategy.ONLY_FIRST , **a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
a__ : List[str] = self.tokenizer.eos_token
try:
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self : Tuple , **a_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("multi_class" , a_ ) is not None:
a__ : str = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
a__ : Tuple = {}
if "candidate_labels" in kwargs:
a__ : Any = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
a__ : str = kwargs["hypothesis_template"]
a__ : Tuple = {}
if "multi_label" in kwargs:
a__ : Dict = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : str , a_ : Union[str, List[str]] , *a_ : List[str] , **a_ : List[Any] , ) -> Tuple:
'''simple docstring'''
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
a__ : Any = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(a_ , **a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : Tuple , a_ : Any=None , a_ : Dict="This example is {}." ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
a__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = inputs["candidate_label"]
a__ : Optional[int] = inputs["sequence"]
a__ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
a__ : int = self.model(**a_ )
a__ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self : Dict , a_ : Any , a_ : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = [outputs["candidate_label"] for outputs in model_outputs]
a__ : Optional[int] = [outputs["sequence"] for outputs in model_outputs]
a__ : Union[str, Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
a__ : List[str] = logits.shape[0]
a__ : Optional[int] = len(a_ )
a__ : List[str] = N // n
a__ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a__ : str = self.entailment_id
a__ : str = -1 if entailment_id == 0 else 0
a__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
a__ : List[Any] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a__ : str = reshaped_outputs[..., self.entailment_id]
a__ : Optional[int] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 642
| 1
|
'''simple docstring'''
import numpy as np
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
UpperCAmelCase_ : str = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ : str = np.zeros((n + 1,) )
UpperCAmelCase_ : Tuple = ya
UpperCAmelCase_ : Optional[int] = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : int = f(SCREAMING_SNAKE_CASE__, y[k] )
UpperCAmelCase_ : Optional[int] = f(x + 0.5 * h, y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Optional[Any] = f(x + 0.5 * h, y[k] + 0.5 * h * ka )
UpperCAmelCase_ : int = f(x + h, y[k] + h * ka )
UpperCAmelCase_ : str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
snake_case_ : Dict = "CompVis/stable-diffusion-v1-2"
snake_case_ : Any = "CompVis/stable-diffusion-v1-3"
snake_case_ : str = "CompVis/stable-diffusion-v1-4"
class __a (lowerCamelCase ):
def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str:
"""simple docstring"""
super()._init_()
UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline(
vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__magic_name__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ : int = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ : str = self.textaimg_sda_a(
prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 644
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''perceiver'''
def __init__( self : Optional[Any] , __a : int=2_56 , __a : List[Any]=12_80 , __a : List[Any]=7_68 , __a : List[str]=1 , __a : Dict=26 , __a : Union[str, Any]=8 , __a : Union[str, Any]=8 , __a : List[Any]=None , __a : str=None , __a : str="kv" , __a : Union[str, Any]=1 , __a : Any=1 , __a : Any="gelu" , __a : Optional[Any]=0.1 , __a : Dict=0.02 , __a : Union[str, Any]=1e-1_2 , __a : List[Any]=True , __a : List[Any]=2_62 , __a : Any=20_48 , __a : str=56 , __a : Tuple=[3_68, 4_96] , __a : Union[str, Any]=16 , __a : List[str]=19_20 , __a : List[Any]=16 , __a : str=[1, 16, 2_24, 2_24] , **__a : Tuple , ):
super().__init__(**__A )
_a = num_latents
_a = d_latents
_a = d_model
_a = num_blocks
_a = num_self_attends_per_block
_a = num_self_attention_heads
_a = num_cross_attention_heads
_a = qk_channels
_a = v_channels
_a = cross_attention_shape_for_attention
_a = self_attention_widening_factor
_a = cross_attention_widening_factor
_a = hidden_act
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = use_query_residual
# masked language modeling attributes
_a = vocab_size
_a = max_position_embeddings
# image classification attributes
_a = image_size
# flow attributes
_a = train_size
# multimodal autoencoding attributes
_a = num_frames
_a = audio_samples_per_frame
_a = samples_per_patch
_a = output_shape
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : str ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self : int ):
return 1e-4
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__A , __A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = preprocessor.num_special_tokens_to_add(__A )
_a = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_a = [" ".join(["a"] ) * seq_length] * batch_size
_a = dict(preprocessor(__A , return_tensors=__A ) )
_a = inputs.pop("input_ids" )
return inputs
elif isinstance(__A , __A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(__A , fixed_dimension=OnnxConfig.default_fixed_batch )
_a = self._generate_dummy_images(__A , __A , __A , __A )
_a = dict(preprocessor(images=__A , return_tensors=__A ) )
_a = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 692
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 67
| 0
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Tuple = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "owlvit_text_model"
def __init__( self : Tuple , _lowercase : Optional[Any]=4_94_08 , _lowercase : Dict=5_12 , _lowercase : str=20_48 , _lowercase : Union[str, Any]=12 , _lowercase : Optional[Any]=8 , _lowercase : Optional[int]=16 , _lowercase : Any="quick_gelu" , _lowercase : List[Any]=1E-5 , _lowercase : Optional[int]=0.0 , _lowercase : Dict=0.02 , _lowercase : str=1.0 , _lowercase : int=0 , _lowercase : str=4_94_06 , _lowercase : List[Any]=4_94_07 , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_act
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = initializer_range
__UpperCAmelCase = initializer_factor
@classmethod
def a ( cls : Optional[int] , _lowercase : Union[str, os.PathLike] , **_lowercase : Tuple ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "owlvit_vision_model"
def __init__( self : Any , _lowercase : List[str]=7_68 , _lowercase : Optional[Any]=30_72 , _lowercase : Optional[int]=12 , _lowercase : Any=12 , _lowercase : List[str]=3 , _lowercase : Optional[Any]=7_68 , _lowercase : int=32 , _lowercase : Union[str, Any]="quick_gelu" , _lowercase : Optional[int]=1E-5 , _lowercase : Optional[int]=0.0 , _lowercase : Optional[int]=0.02 , _lowercase : Union[str, Any]=1.0 , **_lowercase : Any , ):
super().__init__(**_lowercase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = initializer_range
__UpperCAmelCase = initializer_factor
@classmethod
def a ( cls : Dict , _lowercase : Union[str, os.PathLike] , **_lowercase : Optional[int] ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "owlvit"
a__ : int = True
def __init__( self : Dict , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : Dict=5_12 , _lowercase : Any=2.6_592 , _lowercase : Dict=True , **_lowercase : Dict , ):
super().__init__(**_lowercase )
if text_config is None:
__UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__UpperCAmelCase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__UpperCAmelCase = OwlViTTextConfig(**_lowercase )
__UpperCAmelCase = OwlViTVisionConfig(**_lowercase )
__UpperCAmelCase = projection_dim
__UpperCAmelCase = logit_scale_init_value
__UpperCAmelCase = return_dict
__UpperCAmelCase = 1.0
@classmethod
def a ( cls : Dict , _lowercase : Union[str, os.PathLike] , **_lowercase : int ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def a ( cls : Tuple , _lowercase : Dict , _lowercase : Dict , **_lowercase : Optional[int] ):
__UpperCAmelCase = {}
__UpperCAmelCase = text_config
__UpperCAmelCase = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def a ( self : int ):
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.text_config.to_dict()
__UpperCAmelCase = self.vision_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : List[str] ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def a ( self : List[Any] ):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def a ( self : List[str] ):
return 1E-4
def a ( self : List[Any] , _lowercase : "ProcessorMixin" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : Optional["TensorType"] = None , ):
__UpperCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
__UpperCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def a ( self : Union[str, Any] ):
return 14
| 705
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 397
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def lowerCAmelCase_ () -> int:
a_ : List[str] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
a_ : Union[str, Any] = bs[:]
a_ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
a_ : List[Any] = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]:
a_ : Any = set()
a_ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : int = char
return pairs
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = VOCAB_FILES_NAMES
lowerCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
a_ : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
a_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
a_ : int = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
a_ : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
a_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
a_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a_ : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
a_ : Optional[int] = json.load(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = {v: k for k, v in self.encoder.items()}
a_ : Any = errors # how to handle errors in decoding
a_ : List[Any] = bytes_to_unicode()
a_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
a_ : Dict = merges_handle.read().split("\n" )[1:-1]
a_ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
a_ : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
a_ : str = {}
a_ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a_ : Dict = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def A ( self ) -> Union[str, Any]:
return len(self.encoder )
def A ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if token in self.cache:
return self.cache[token]
a_ : Optional[Any] = tuple(_SCREAMING_SNAKE_CASE )
a_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
a_ : List[Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : Any = bigram
a_ : Tuple = []
a_ : int = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
a_ : Optional[int] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a_ : str = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : int = tuple(_SCREAMING_SNAKE_CASE )
a_ : Any = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
a_ : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
a_ : str = " ".join(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = word
return word
def A ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
a_ : str = []
for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ):
a_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) )
return bpe_tokens
def A ( self , _SCREAMING_SNAKE_CASE ) -> str:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
a_ : Dict = "".join(_SCREAMING_SNAKE_CASE )
a_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + "\n" )
a_ : str = 0
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
a_ : Union[str, Any] = token_index
writer.write(" ".join(_SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : List[Any] = [self.cls_token_id]
a_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
a_ : Any = [self.sep_token_id]
a_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> str:
a_ : str = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
a_ : str = " " + text
return (text, kwargs)
| 473
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = PriorTransformer
lowerCAmelCase__ : int = """hidden_states"""
@property
def A ( self ) -> Tuple:
a_ : Union[str, Any] = 4
a_ : Tuple = 8
a_ : Dict = 7
a_ : Dict = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : int = 4
a_ : Dict = 8
a_ : Dict = 7
a_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def A ( self ) -> Optional[Any]:
return (4, 8)
@property
def A ( self ) -> Any:
return (4, 8)
def A ( self ) -> List[str]:
a_ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def A ( self ) -> Dict:
a_ , a_ : str = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def A ( self ) -> str:
a_ , a_ : str = self.prepare_init_args_and_inputs_for_common()
a_ : List[Any] = self.model_class(**_SCREAMING_SNAKE_CASE )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> Any:
a_ : Tuple = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
a_ : Union[str, Any] = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
a_ : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
a_ : Dict = model(**_SCREAMING_SNAKE_CASE )[0]
a_ : Any = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=7_7 , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = batch_size
a_ : Optional[Any] = embedding_dim
a_ : Tuple = num_embeddings
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
a_ : List[Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : int = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
a_ : str = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 473
| 1
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = "vision-encoder-decoder"
lowerCamelCase__ : Tuple = True
def __init__( self , **a ) -> Any:
super().__init__(**a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
lowercase__ : int = kwargs.pop('encoder' )
lowercase__ : Optional[Any] = encoder_config.pop('model_type' )
lowercase__ : Dict = kwargs.pop('decoder' )
lowercase__ : Dict = decoder_config.pop('model_type' )
lowercase__ : Optional[Any] = AutoConfig.for_model(a , **a )
lowercase__ : str = AutoConfig.for_model(a , **a )
lowercase__ : List[str] = True
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
lowercase__ : Optional[Any] = True
lowercase__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : List[Any] = self.encoder.to_dict()
lowercase__ : Optional[int] = self.decoder.to_dict()
lowercase__ : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = version.parse("1.11")
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
lowercase__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
lowercase__ : Optional[int] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _UpperCAmelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
import torch
lowercase__ : List[Any] = OrderedDict()
lowercase__ : Dict = super().generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
lowercase__ , lowercase__ : Dict = dummy_input['input_ids'].shape
lowercase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase__ : Tuple = dummy_input.pop('input_ids' )
lowercase__ : List[Any] = dummy_input.pop('attention_mask' )
lowercase__ : str = torch.zeros(a )
return common_inputs
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> None:
pass
def _UpperCAmelCase ( self , a ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(a )
def _UpperCAmelCase ( self , a , a , a = "default" ) -> OnnxConfig:
lowercase__ : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a , a )
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[str] = '''yolos'''
def __init__( self , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=[5_1_2, 8_6_4] , _UpperCamelCase=1_6 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=1_0_0 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=1 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=0.1 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : Union[str, Any] = num_detection_tokens
UpperCAmelCase_ : Tuple = use_mid_position_embeddings
UpperCAmelCase_ : int = auxiliary_loss
# Hungarian matcher
UpperCAmelCase_ : Any = class_cost
UpperCAmelCase_ : Optional[Any] = bbox_cost
UpperCAmelCase_ : Any = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = bbox_loss_coefficient
UpperCAmelCase_ : List[Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1E-4
@property
def __UpperCAmelCase ( self ) -> int:
return 1_2
| 406
|
from math import isqrt
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
UpperCAmelCase_ : List[Any] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def lowercase__ ( __snake_case : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Union[str, Any] = len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 406
| 1
|
"""simple docstring"""
def __A ( a_ :int) -> bool:
return sum(i for i in range(1 , number // 2 + 1) if number % i == 0) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
A = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 101
|
"""simple docstring"""
def __A ( a_ :int = 2_00) -> int:
__a : int = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
__a : List[Any] = [0] * (pence + 1)
__a : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(a_ , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 101
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase = """pt"""
elif is_tf_available():
_UpperCAmelCase = """tf"""
else:
_UpperCAmelCase = """jax"""
class a ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = ByTaTokenizer
UpperCamelCase : Tuple = False
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: int =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase__ ( self : str , **lowerCAmelCase : Dict ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Any=False , lowerCAmelCase : List[str]=20 , lowerCAmelCase : Union[str, Any]=5 ) -> Tuple[str, list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for i in range(len(lowercase_ ) ):
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_: int =list(filter(lambda lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowercase_ ) )
SCREAMING_SNAKE_CASE_: List[str] =list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase_ ) , lowercase_ ) )
if max_length is not None and len(lowercase_ ) > max_length:
SCREAMING_SNAKE_CASE_: List[str] =toks[:max_length]
if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0:
while len(lowercase_ ) < min_length:
SCREAMING_SNAKE_CASE_: Optional[int] =toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_: List[Any] =[t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
if " " not in output_txt and len(lowercase_ ) > 1:
SCREAMING_SNAKE_CASE_: Optional[Any] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_: Dict =' ' + output_txt
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
return output_txt, output_ids
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
SCREAMING_SNAKE_CASE_: str =tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: Any ='Unicode €.'
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(lowercase_ )
SCREAMING_SNAKE_CASE_: str =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , lowercase_ )
# decoding
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , """Unicode €.</s>""" )
SCREAMING_SNAKE_CASE_: Any =tokenizer("""e è é ê ë""" )
SCREAMING_SNAKE_CASE_: int =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , lowercase_ )
# decoding
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: Union[str, Any] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_: Any =tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_: List[Any] =list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowercase_ )
self.assertIn("""attention_mask""" , lowercase_ )
self.assertNotIn("""decoder_input_ids""" , lowercase_ )
self.assertNotIn("""decoder_attention_mask""" , lowercase_ )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: int =[
'Summary of the text.',
'Another summary.',
]
SCREAMING_SNAKE_CASE_: Any =tokenizer(
text_target=lowercase_ , max_length=32 , padding="""max_length""" , truncation=lowercase_ , return_tensors=lowercase_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_: str =['A long paragraph for summarization. </s>']
SCREAMING_SNAKE_CASE_: Tuple =['Summary of the text. </s>']
# fmt: off
SCREAMING_SNAKE_CASE_: Any =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_: Optional[int] =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_: int =tokenizer(lowercase_ , text_target=lowercase_ )
self.assertEqual(lowercase_ , batch["""input_ids"""][0] )
self.assertEqual(lowercase_ , batch["""labels"""][0] )
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_: Optional[Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Dict =' He is very happy, UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_: Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.__class__.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE_: Tuple =after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
shutil.rmtree(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Dict =' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(["""bim""", """bambam"""] )
SCREAMING_SNAKE_CASE_: int =tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
tokenizer.save_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE_: int =tokenizer.__class__.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.__class__.from_pretrained(lowercase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase_ )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE_: Dict =json.load(lowercase_ )
with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE_: int =json.load(lowercase_ )
SCREAMING_SNAKE_CASE_: List[str] =[f'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_: List[str] =added_tokens_extra_ids + [
'an_additional_special_token'
]
SCREAMING_SNAKE_CASE_: Any =added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase_ , lowercase_ )
with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase_ , lowercase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_: str =tokenizer_class.from_pretrained(
lowercase_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_: Tuple =added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowercase_ )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase__ ( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_class.from_pretrained(lowercase_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: Optional[Any] =['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
SCREAMING_SNAKE_CASE_: Dict =tokenizer.convert_tokens_to_string(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =tokenizer.convert_ids_to_tokens(
lowercase_ , skip_special_tokens=lowercase_ )
for attr in attributes_list:
setattr(lowercase_ , attr + """_id""" , lowercase_ )
self.assertEqual(getattr(lowercase_ , lowercase_ ) , lowercase_ )
self.assertEqual(getattr(lowercase_ , attr + """_id""" ) , lowercase_ )
setattr(lowercase_ , attr + """_id""" , lowercase_ )
self.assertEqual(getattr(lowercase_ , lowercase_ ) , lowercase_ )
self.assertEqual(getattr(lowercase_ , attr + """_id""" ) , lowercase_ )
setattr(lowercase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowercase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowercase_ , """additional_special_tokens_ids""" ) , [] )
setattr(lowercase_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowercase_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowercase_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 409
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCamelCase_ ):
@slow
@require_torch
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : int =EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_lowerCamelCase : Dict =BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase : Union[str, Any] =bertabert.config.encoder.vocab_size
_lowerCamelCase : Dict =tokenizer.sep_token_id
_lowerCamelCase : List[Any] =tokenizer.cls_token_id
_lowerCamelCase : Optional[int] =128
_lowerCamelCase : int =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_lowerCamelCase : int =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_lowerCamelCase : Optional[int] =train_dataset.select(range(32 ) )
_lowerCamelCase : Optional[int] =val_dataset.select(range(16 ) )
_lowerCamelCase : Optional[int] =4
def _map_to_encoder_decoder_inputs(lowercase_ : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : Optional[int] =tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=512 )
_lowerCamelCase : List[str] =tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=128 )
_lowerCamelCase : List[str] =inputs.input_ids
_lowerCamelCase : Any =inputs.attention_mask
_lowerCamelCase : List[str] =outputs.input_ids
_lowerCamelCase : int =outputs.input_ids.copy()
_lowerCamelCase : List[str] =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_lowerCamelCase : Dict =outputs.attention_mask
assert all(len(lowercase_ ) == 512 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ : str ):
_lowerCamelCase : List[Any] =pred.label_ids
_lowerCamelCase : List[Any] =pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : str =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : List[str] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : Tuple =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_lowerCamelCase : Tuple =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_lowerCamelCase : Dict =self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[int] =SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : Optional[Any] =SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 464
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = KandinskyImgaImgPipeline
UpperCamelCase_ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase_ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase_ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase = MultilingualCLIP(_lowerCAmelCase )
_lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowerCAmelCase = DDIMScheduler(**_lowerCAmelCase )
_lowerCAmelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=0 ) -> List[str]:
_lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase = 'A red cartoon frog, 4k'
_lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
_lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 702
|
from __future__ import annotations
_UpperCamelCase: Dict =8.9_88e9 # units = N * m^s * C^-2
def _a ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
_lowerCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_lowerCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __snake_case ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ) -> float:
"""simple docstring"""
UpperCAmelCase = x_start
UpperCAmelCase = fnc(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase = (x_end - x_start) / steps + xa
UpperCAmelCase = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase = xa
UpperCAmelCase = fxa
return area
if __name__ == "__main__":
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
a__ : Union[str, Any] = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 51
|
_SCREAMING_SNAKE_CASE : List[str] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
_SCREAMING_SNAKE_CASE : str = ['''a''', '''b''', '''c''', '''d''', '''e''']
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = start
# add current to visited
visited.append(_A )
SCREAMING_SNAKE_CASE__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(_A , _A , _A )
# if all neighbors visited add current to sort
sort.append(_A )
# if all vertices haven't been visited select a new one to visit
if len(_A ) != len(_A ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(_A , _A , _A )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = topological_sort('''a''', [], [])
print(sort)
| 493
| 0
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , ) -> int:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( __snake_case , unittest.TestCase ):
A__ : Optional[Any] = True
A__ : List[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = FlaxBertModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__a = FlaxBertModel.from_pretrained('bert-base-cased' )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 246
|
def lowerCAmelCase( __lowerCamelCase ):
__a = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 246
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Tuple = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE: List[str] = (('num_inference_steps', 25),)
def _a ( self , **lowerCamelCase__ ):
lowerCAmelCase_: Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def _a ( self , lowerCamelCase__=0 , **lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase_: Optional[int] = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
lowerCAmelCase_: Tuple = self.dummy_sample
lowerCAmelCase_: Any = 0.1 * sample
lowerCAmelCase_: Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_: int = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_: str = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowerCAmelCase_: Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowerCAmelCase_: List[Any] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowerCAmelCase_: str = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_: Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_: List[Any] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self ):
pass
def _a ( self , lowerCamelCase__=0 , **lowerCamelCase__ ):
lowerCAmelCase_: List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_: Dict = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
lowerCAmelCase_: int = self.dummy_sample
lowerCAmelCase_: List[Any] = 0.1 * sample
lowerCAmelCase_: int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_: List[str] = self.get_scheduler_config()
lowerCAmelCase_: List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_: Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowerCAmelCase_: int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_: str = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_: List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_: Optional[int] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , lowerCamelCase__=None , **lowerCamelCase__ ):
if scheduler is None:
lowerCAmelCase_: str = self.scheduler_classes[0]
lowerCAmelCase_: Optional[int] = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase_: List[str] = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_: List[Any] = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_: List[Any] = 10
lowerCAmelCase_: Dict = self.dummy_model()
lowerCAmelCase_: List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_: Tuple = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def _a ( self ):
lowerCAmelCase_: Any = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_: Dict = 50
lowerCAmelCase_: Optional[int] = self.dummy_model()
lowerCAmelCase_: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase_: Optional[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
lowerCAmelCase_: Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def _a ( self ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _a ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase_: List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_: Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
lowerCAmelCase_: Dict = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
lowerCAmelCase_: int = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_: Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_: List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_: Any = self.full_loop(scheduler=lowerCamelCase__ )
lowerCAmelCase_: int = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def _a ( self ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def _a ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _a ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
lowerCAmelCase_: str = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def _a ( self ):
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def _a ( self ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _a ( self ):
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _a ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def _a ( self ):
lowerCAmelCase_: List[str] = self.full_loop()
lowerCAmelCase_: List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def _a ( self ):
lowerCAmelCase_: Any = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase_: List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def _a ( self ):
lowerCAmelCase_: Tuple = self.scheduler_classes[0]
lowerCAmelCase_: List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase_: Tuple = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_: Any = 10
lowerCAmelCase_: Union[str, Any] = self.dummy_model()
lowerCAmelCase_: Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_: Dict = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_: List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 613
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Union[str, Any] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE: Dict = False
SCREAMING_SNAKE_CASE: List[Any] = True
def _a ( self ):
super().setUp()
lowerCAmelCase_: Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase_: Optional[int] = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_ , lowerCAmelCase_: str = self.get_input_output_texts(lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: List[str] = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _a ( self ):
lowerCAmelCase_: List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: List[Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: Union[str, Any] = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: List[str] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: str = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
lowerCAmelCase_: List[Any] = MecabTokenizer(do_lower_case=lowerCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: Any = MecabTokenizer(
do_lower_case=lowerCamelCase__ , normalize_text=lowerCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _a ( self ):
lowerCAmelCase_: str = MecabTokenizer(normalize_text=lowerCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: List[str] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: List[str] = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: List[str] = SudachiTokenizer(do_lower_case=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(normalize_text=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Tuple = SudachiTokenizer(trim_whitespace=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: Any = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: Any = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: List[str] = JumanppTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Optional[Any] = JumanppTokenizer(normalize_text=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: List[str] = JumanppTokenizer(trim_whitespace=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _a ( self ):
lowerCAmelCase_: Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCAmelCase_: Tuple = {}
for i, token in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: List[Any] = i
lowerCAmelCase_: List[str] = WordpieceTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _a ( self ):
lowerCAmelCase_: List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCAmelCase_: Optional[Any] = tokenizer.subword_tokenizer
lowerCAmelCase_: List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCAmelCase_: Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _a ( self ):
lowerCAmelCase_: str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCAmelCase_: int = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Tuple = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE: Optional[Any] = False
def _a ( self ):
super().setUp()
lowerCAmelCase_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , **lowerCamelCase__ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCamelCase__ )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: str = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase_: Tuple = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
lowerCAmelCase_: str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase_: List[str] = {}
for i, token in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = i
lowerCAmelCase_: List[Any] = CharacterTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCAmelCase_: Any = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowerCAmelCase_: Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = "cl-tohoku/bert-base-japanese"
lowerCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Dict = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCAmelCase_: List[str] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 613
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , _a : List[str] , _a : Any=13 , _a : Tuple=10 , _a : Dict=3 , _a : List[str]=2 , _a : Any=2 , _a : Any=2 , _a : Optional[int]=True , _a : Optional[int]=True , _a : str=32 , _a : Tuple=5 , _a : Any=4 , _a : Tuple=37 , _a : str="gelu" , _a : Optional[Any]=0.1 , _a : int=0.1 , _a : int=10 , _a : str=0.02 , _a : Union[str, Any]=0.9 , _a : Tuple=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = tubelet_size
UpperCamelCase__ = num_frames
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = mask_ratio
UpperCamelCase__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
UpperCamelCase__ = int(mask_ratio * self.seq_length )
def A_ ( self : Dict ):
UpperCamelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Union[str, Any] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def A_ ( self : int , _a : int , _a : List[Any] , _a : Tuple ):
UpperCamelCase__ = VideoMAEModel(config=_a )
model.to(_a )
model.eval()
UpperCamelCase__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Dict , _a : Dict , _a : Dict , _a : Optional[Any] ):
UpperCamelCase__ = VideoMAEForPreTraining(_a )
model.to(_a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCamelCase__ = torch.ones((self.num_masks,) )
UpperCamelCase__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
UpperCamelCase__ = mask.expand(self.batch_size , -1 ).bool()
UpperCamelCase__ = model(_a , _a )
# model only returns predictions for masked patches
UpperCamelCase__ = mask.sum().item()
UpperCamelCase__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCamelCase_, UpperCamelCase_, unittest.TestCase ):
'''simple docstring'''
_A : int = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_A : Union[str, Any] = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_A : Dict = False
_A : Union[str, Any] = False
_A : Tuple = False
_A : int = False
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = VideoMAEModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def A_ ( self : int , _a : Optional[Any] , _a : Optional[Any] , _a : Union[str, Any]=False ):
UpperCamelCase__ = copy.deepcopy(_a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCamelCase__ = torch.ones((self.model_tester.num_masks,) )
UpperCamelCase__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
UpperCamelCase__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
UpperCamelCase__ = bool_masked_pos.to(_a )
if return_labels:
if model_class in [
*get_values(_a ),
]:
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def A_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def A_ ( self : List[str] ):
pass
def A_ ( self : List[str] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def A_ ( self : str ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
@slow
def A_ ( self : Dict ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = VideoMAEModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def A_ ( self : int ):
if not self.has_attentions:
pass
else:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
for model_class in self.all_model_classes:
UpperCamelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
UpperCamelCase__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCamelCase__ = len(_a )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A_ ( self : Dict ):
def check_hidden_states_output(_a : Optional[Any] , _a : List[str] , _a : Optional[Any] ):
UpperCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_a ) , _a )
UpperCamelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
UpperCamelCase__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A_ ( self : List[str] ):
pass
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
UpperCamelCase__ = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A_ ( self : str ):
UpperCamelCase__ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_a )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_video()
UpperCamelCase__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_a )
# verify the logits
UpperCamelCase__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _a )
UpperCamelCase__ = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def A_ ( self : Tuple ):
UpperCamelCase__ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_a )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_video()
UpperCamelCase__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
# add boolean mask, indicating which patches to mask
UpperCamelCase__ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
UpperCamelCase__ = torch.load(_a )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_a )
# verify the logits
UpperCamelCase__ = torch.Size([1, 1_408, 1_536] )
UpperCamelCase__ = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_a )
self.assertEqual(outputs.logits.shape , _a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
UpperCamelCase__ = torch.tensor([0.5142] , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
UpperCamelCase__ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_a ).to(
_a )
with torch.no_grad():
UpperCamelCase__ = model(**_a )
UpperCamelCase__ = torch.tensor(torch.tensor([0.6469] ) , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1E-4 ) )
| 719
|
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = tweepy.OAuthHandler(UpperCamelCase__, UpperCamelCase__ )
auth.set_access_token(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = tweepy.API(UpperCamelCase__ )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ = api.user_timeline(screen_name=UpperCamelCase__, count=200 )
# save most recent tweets
alltweets.extend(UpperCamelCase__ )
# save the id of the oldest tweet less one
UpperCamelCase__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ = api.user_timeline(
screen_name=UpperCamelCase__, count=200, max_id=UpperCamelCase__ )
# save most recent tweets
alltweets.extend(UpperCamelCase__ )
# update the id of the oldest tweet less one
UpperCamelCase__ = alltweets[-1].id - 1
print(F"""...{len(UpperCamelCase__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""", '''w''' ) as f:
UpperCamelCase__ = csv.writer(UpperCamelCase__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(UpperCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 591
| 0
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = tmp_path / "file.csv"
lowercase__ = textwrap.dedent(
'''\\n header1,header2\n 1,2\n 10,20\n ''' )
with open(lowerCamelCase__ , '''w''' ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = tmp_path / "malformed_file.csv"
lowercase__ = textwrap.dedent(
'''\\n header1,header2\n 1,2\n 10,20,\n ''' )
with open(lowerCamelCase__ , '''w''' ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = tmp_path / "csv_with_image.csv"
lowercase__ = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(lowerCamelCase__ , '''w''' ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = tmp_path / "csv_with_label.csv"
lowercase__ = textwrap.dedent(
'''\\n label\n good\n bad\n good\n ''' )
with open(lowerCamelCase__ , '''w''' ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = tmp_path / "csv_with_int_list.csv"
lowercase__ = textwrap.dedent(
'''\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ''' )
with open(lowerCamelCase__ , '''w''' ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowerCamelCase__ ) in record.message
for record in caplog.records )
@require_pil
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(lowerCamelCase__ , encoding='''utf-8''' ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowercase__ = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(lowerCamelCase__ , encoding='''utf-8''' ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowercase__ = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowerCamelCase__ ) for label in labels]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE : [int(lowerCamelCase__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowercase__ = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 43
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
A__ : Tuple = None
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Dict = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
A__ : Optional[int] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
A__ : Tuple = '▁'
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[int] = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case__ : str=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Any=True , snake_case__ : str=False , snake_case__ : List[Any]="[CLS]" , snake_case__ : Any="[SEP]" , snake_case__ : str="<unk>" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : Dict="<pad>" , snake_case__ : Optional[int]="[CLS]" , snake_case__ : Optional[int]="[MASK]" , **snake_case__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase_ : Optional[int] =(
AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ , normalized=snake_case__ )
if isinstance(snake_case__ , snake_case__ )
else mask_token
)
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Tuple =do_lower_case
lowerCamelCase_ : str =remove_space
lowerCamelCase_ : Union[str, Any] =keep_accents
lowerCamelCase_ : int =vocab_file
lowerCamelCase_ : Optional[int] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : int =[self.sep_token_id]
lowerCamelCase_ : Dict =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : int =[self.sep_token_id]
lowerCamelCase_ : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Union[str, Any] =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 153
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : List[str] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ["DeiTFeatureExtractor"]
__a : List[Any] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 199
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : str=37 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[Any]=512 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : str=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , ):
"""simple docstring"""
__A = True
__A = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , ):
"""simple docstring"""
__A = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__A = True
__A = True
__A = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] , dim=-1 )
__A = torch.cat([input_mask, next_mask] , dim=-1 )
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -3:, random_slice_idx].detach()
__A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = LlamaModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = input_dict["""input_ids"""]
__A = input_ids.ne(1 ).to(UpperCamelCase_ )
__A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = """single_label_classification"""
__A = input_dict["""input_ids"""]
__A = input_ids.ne(1 ).to(UpperCamelCase_ )
__A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = """multi_label_classification"""
__A = input_dict["""input_ids"""]
__A = input_ids.ne(1 ).to(UpperCamelCase_ )
__A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ids_tensor([1, 10] , config.vocab_size )
__A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
__A = original_model(UpperCamelCase_ ).last_hidden_state
__A = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A = {"""type""": scaling_type, """factor""": 10.0}
__A = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
__A = scaled_model(UpperCamelCase_ ).last_hidden_state
__A = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__A = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__A = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
__A = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__A = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
__A = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__A = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__A = model(torch.tensor(UpperCamelCase_ ) )
__A = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__A = """Simply put, the theory of relativity states that """
__A = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__A = tokenizer.encode(UpperCamelCase_ , return_tensors="""pt""" )
__A = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
__A = model.generate(UpperCamelCase_ , max_new_tokens=64 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
__A = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 199
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCamelCase_ ( __snake_case ):
'''simple docstring'''
UpperCAmelCase__ = '''umt5'''
UpperCAmelCase__ = ['''past_key_values''']
def __init__( self : Any , UpperCAmelCase__ : int=250_112 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Dict=1_024 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=128 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=1e-6 , UpperCAmelCase__ : Tuple=1.0 , UpperCAmelCase__ : Tuple="gated-gelu" , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str="T5Tokenizer" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Optional[int]=0 , **UpperCAmelCase__ : Any , ) ->str:
'''simple docstring'''
super().__init__(
is_encoder_decoder=UpperCAmelCase__ , tokenizer_class=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = self.feed_forward_proj.split('''-''')
A__ = act_info[-1]
A__ = act_info[0] == '''gated'''
if len(UpperCAmelCase__) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
if feed_forward_proj == "gated-gelu":
A__ = '''gelu_new'''
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
return self.d_model
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
return self.num_heads
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.num_layers
class UpperCamelCase_ ( __snake_case ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
A__ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A__ = '''past_encoder_sequence + sequence'''
A__ = {0: '''batch'''}
A__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A__ = {0: '''batch''', 1: '''decoder_sequence'''}
A__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction='''inputs''')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
return 13
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
return 5e-4
| 87
|
from __future__ import annotations
def _lowercase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return False
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , SCREAMING_SNAKE_CASE_ )
else:
return binary_search(a_list[midpoint + 1 :] , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = input("Enter numbers separated by comma:\n").strip()
__snake_case = [int(item.strip()) for item in user_input.split(",")]
__snake_case = int(input("Enter the number to be found in the list:\n").strip())
__snake_case = "" if binary_search(sequence, target) else "not "
print(F'''{target} was {not_str}found in {sequence}''')
| 386
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : int ):
_A = [[] for _ in range(_UpperCAmelCase )]
_A = size
def __getitem__( self : Optional[int] , _UpperCAmelCase : int ):
return iter(self._graph[vertex] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self._size
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = deque([start_vertex] )
_A = [None] * self.size
_A = 0
while queue:
_A = queue.popleft()
_A = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_A = current_distance + edge.weight
_A = distances[edge.destination_vertex]
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
_A = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
_A = inspect.getfile(accelerate.test_utils )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_A = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCAmelCase_ ( self : List[Any] ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : Optional[Any] ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase_ ( self : List[str] ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
a = Accelerator()
a = (accelerator.state.process_index + 2, 10)
a = torch.randint(0, 10, shape).to(accelerator.device)
a = ''''''
a = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 505
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = RoFormerTokenizer
a :str = RoFormerTokenizerFast
a :List[str] = True
a :Optional[Any] = True
def _lowercase ( self : Optional[Any] ) -> int:
super().setUp()
def _lowercase ( self : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = self.get_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> str:
lowercase_ = self.get_rust_tokenizer()
lowercase_ , lowercase_ = self.get_chinese_input_output_texts()
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split() )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> str:
pass
def _lowercase ( self : Optional[int] ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] ) -> List[Any]:
pass
| 97
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=10 , lowerCamelCase__=[10, 20, 30, 40] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCamelCase : Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 325
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase = logging.getLogger(__name__)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any]=None ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: Any , UpperCAmelCase_: int ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_SCREAMING_SNAKE_CASE = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE = dist.new_group(ranks=UpperCAmelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase ( self: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=torch.floataa ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.empty(UpperCAmelCase_ , dtype=UpperCAmelCase_ )
dist.scatter(UpperCAmelCase_ , src=0 , scatter_list=UpperCAmelCase_ , group=self.process_group )
return target_tensor
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE = next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCAmelCase_ )
return ifname
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: int ):
'''simple docstring'''
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase_ )
# distributed training
_SCREAMING_SNAKE_CASE = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE = None
if self._is_main():
_SCREAMING_SNAKE_CASE = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase_ )]
dist.gather(torch.tensor(UpperCAmelCase_ ) , dst=0 , gather_list=UpperCAmelCase_ , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self._is_main():
assert len(UpperCAmelCase_ ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._main_retrieve(torch.cat(UpperCAmelCase_ ).numpy() , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ ), torch.tensor(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._chunk_tensor(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._chunk_tensor(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._scattered(UpperCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE = self._scattered(UpperCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase_ )
| 701
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_SCREAMING_SNAKE_CASE = shift_tokens_right(UpperCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ).logits
_SCREAMING_SNAKE_CASE = optax.softmax_cross_entropy(UpperCAmelCase_ , onehot(UpperCAmelCase_ , logits.shape[-1] ) ).mean()
_SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
_SCREAMING_SNAKE_CASE = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 569
| 0
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = """align_text_model"""
def __init__( self : int , a_ : Dict=3_05_22 , a_ : Union[str, Any]=7_68 , a_ : List[str]=12 , a_ : Optional[int]=12 , a_ : List[str]=30_72 , a_ : Optional[int]="gelu" , a_ : int=0.1 , a_ : Tuple=0.1 , a_ : int=5_12 , a_ : Dict=2 , a_ : Optional[int]=0.02 , a_ : Dict=1e-1_2 , a_ : Optional[int]=0 , a_ : Optional[int]="absolute" , a_ : Union[str, Any]=True , **a_ : Union[str, Any] , ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : List[Any] = pad_token_id
@classmethod
def lowerCamelCase ( cls : str , a_ : Union[str, os.PathLike] , **a_ : str ):
cls._set_token_in_kwargs(a_ )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = cls.get_config_dict(a_ , **a_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Any = """align_vision_model"""
def __init__( self : List[str] , a_ : int = 3 , a_ : int = 6_00 , a_ : float = 2.0 , a_ : float = 3.1 , a_ : int = 8 , a_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , a_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , a_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a_ : List[int] = [] , a_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , a_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , a_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , a_ : float = 0.25 , a_ : str = "swish" , a_ : int = 25_60 , a_ : str = "mean" , a_ : float = 0.02 , a_ : float = 0.001 , a_ : float = 0.99 , a_ : float = 0.2 , **a_ : str , ):
super().__init__(**a_ )
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Optional[int] = width_coefficient
lowerCAmelCase_ : str = depth_coefficient
lowerCAmelCase_ : Any = depth_divisor
lowerCAmelCase_ : Any = kernel_sizes
lowerCAmelCase_ : Optional[Any] = in_channels
lowerCAmelCase_ : List[Any] = out_channels
lowerCAmelCase_ : Tuple = depthwise_padding
lowerCAmelCase_ : Any = strides
lowerCAmelCase_ : List[str] = num_block_repeats
lowerCAmelCase_ : int = expand_ratios
lowerCAmelCase_ : Optional[int] = squeeze_expansion_ratio
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : str = hidden_dim
lowerCAmelCase_ : int = pooling_type
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Tuple = batch_norm_eps
lowerCAmelCase_ : List[Any] = batch_norm_momentum
lowerCAmelCase_ : int = drop_connect_rate
lowerCAmelCase_ : str = sum(a_ ) * 4
@classmethod
def lowerCamelCase ( cls : Optional[Any] , a_ : Union[str, os.PathLike] , **a_ : Union[str, Any] ):
cls._set_token_in_kwargs(a_ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = cls.get_config_dict(a_ , **a_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[str] = """align"""
a_ : Any = True
def __init__( self : Dict , a_ : int=None , a_ : Dict=None , a_ : Any=6_40 , a_ : Dict=1.0 , a_ : List[Any]=0.02 , **a_ : int , ):
super().__init__(**a_ )
if text_config is None:
lowerCAmelCase_ : Dict = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowerCAmelCase_ : int = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowerCAmelCase_ : str = AlignTextConfig(**a_ )
lowerCAmelCase_ : str = AlignVisionConfig(**a_ )
lowerCAmelCase_ : Dict = projection_dim
lowerCAmelCase_ : str = temperature_init_value
lowerCAmelCase_ : Any = initializer_range
@classmethod
def lowerCamelCase ( cls : str , a_ : AlignTextConfig , a_ : AlignVisionConfig , **a_ : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase_ : List[Any] = self.vision_config.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 610
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : int = torch.exp(__UpperCamelCase )
lowerCAmelCase_ : Dict = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
lowerCAmelCase_ : List[Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Dict ):
super().__init__()
lowerCAmelCase_ : Dict = config.output_attentions
lowerCAmelCase_ : Any = config.output_hidden_states
lowerCAmelCase_ : Dict = nn.ModuleList([BertLayer(a_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase_ : int = nn.ModuleList([BertHighway(a_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase_ : Optional[Any] = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase ( self : List[Any] , a_ : Tuple ):
if (type(a_ ) is float) or (type(a_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase_ : Dict = x
else:
lowerCAmelCase_ : Optional[Any] = x
def lowerCamelCase ( self : List[str] , a_ : str ):
lowerCAmelCase_ : Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase ( self : Any , a_ : List[str] , a_ : Tuple=None , a_ : Optional[Any]=None , a_ : Tuple=None , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = ()
lowerCAmelCase_ : int = ()
lowerCAmelCase_ : Optional[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase_ : List[str] = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Optional[int] = layer_module(
a_ , a_ , head_mask[i] , a_ , a_ )
lowerCAmelCase_ : str = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase_ : Tuple = all_attentions + (layer_outputs[1],)
lowerCAmelCase_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase_ : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase_ : Optional[Any] = current_outputs + (all_attentions,)
lowerCAmelCase_ : Tuple = self.highway[i](a_ )
# logits, pooled_output
if not self.training:
lowerCAmelCase_ : Union[str, Any] = highway_exit[0]
lowerCAmelCase_ : int = entropy(a_ )
lowerCAmelCase_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase_ : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a_ , i + 1 )
else:
lowerCAmelCase_ : Optional[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase_ : Any = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase_ : Any = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase_ : Dict = outputs + (all_attentions,)
lowerCAmelCase_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Optional[int] ):
super().__init__(a_ )
lowerCAmelCase_ : Any = config
lowerCAmelCase_ : int = BertEmbeddings(a_ )
lowerCAmelCase_ : List[Any] = DeeBertEncoder(a_ )
lowerCAmelCase_ : Optional[Any] = BertPooler(a_ )
self.init_weights()
def lowerCamelCase ( self : Tuple ):
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase ( self : Optional[int] ):
return self.embeddings.word_embeddings
def lowerCamelCase ( self : int , a_ : Union[str, Any] ):
lowerCAmelCase_ : Dict = value
def lowerCamelCase ( self : Optional[int] , a_ : List[Any] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a_ )
@add_start_docstrings_to_model_forward(a_ )
def lowerCamelCase ( self : str , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : str=None , a_ : Dict=None , a_ : List[str]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Any=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
lowerCAmelCase_ : List[str] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase_ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
lowerCAmelCase_ : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase_ : int = torch.ones(a_ , device=a_ )
if encoder_attention_mask is None:
lowerCAmelCase_ : Any = torch.ones(a_ , device=a_ )
if token_type_ids is None:
lowerCAmelCase_ : Tuple = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase_ : torch.Tensor = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase_ : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase_ : List[Any] = encoder_attention_mask[:, None, None, :]
lowerCAmelCase_ : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase_ : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase_ : Tuple = self.get_head_mask(a_ , self.config.num_hidden_layers )
lowerCAmelCase_ : Tuple = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
lowerCAmelCase_ : Dict = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowerCAmelCase_ : Dict = encoder_outputs[0]
lowerCAmelCase_ : int = self.pooler(a_ )
lowerCAmelCase_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : int , a_ : Optional[int] , a_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = message
lowerCAmelCase_ : Dict = exit_layer # start from 1!
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , a_ : Union[str, Any] ):
super().__init__()
lowerCAmelCase_ : Optional[int] = BertPooler(a_ )
lowerCAmelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels )
def lowerCamelCase ( self : List[Any] , a_ : int ):
# Pooler
lowerCAmelCase_ : Any = encoder_outputs[0]
lowerCAmelCase_ : List[Any] = self.pooler(a_ )
# "return" pooler_output
# BertModel
lowerCAmelCase_ : str = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase_ : Union[str, Any] = bmodel_output[1]
lowerCAmelCase_ : Tuple = self.dropout(a_ )
lowerCAmelCase_ : str = self.classifier(a_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[str] , a_ : Optional[Any] ):
super().__init__(a_ )
lowerCAmelCase_ : Dict = config.num_labels
lowerCAmelCase_ : List[Any] = config.num_hidden_layers
lowerCAmelCase_ : str = DeeBertModel(a_ )
lowerCAmelCase_ : str = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def lowerCamelCase ( self : List[str] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : List[Any]=None , a_ : Optional[int]=None , a_ : List[str]=None , a_ : Dict=None , a_ : Any=None , a_ : Optional[int]=-1 , a_ : Union[str, Any]=False , ):
lowerCAmelCase_ : Dict = self.num_layers
try:
lowerCAmelCase_ : Optional[int] = self.bert(
a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase_ : Union[str, Any] = outputs[1]
lowerCAmelCase_ : List[Any] = self.dropout(a_ )
lowerCAmelCase_ : Dict = self.classifier(a_ )
lowerCAmelCase_ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase_ : Optional[int] = e.message
lowerCAmelCase_ : str = e.exit_layer
lowerCAmelCase_ : Union[str, Any] = outputs[0]
if not self.training:
lowerCAmelCase_ : List[str] = entropy(a_ )
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : int = MSELoss()
lowerCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : int = CrossEntropyLoss()
lowerCAmelCase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase_ : Any = []
for highway_exit in outputs[-1]:
lowerCAmelCase_ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(a_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : Tuple = MSELoss()
lowerCAmelCase_ : Dict = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : Optional[Any] = CrossEntropyLoss()
lowerCAmelCase_ : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a_ )
if train_highway:
lowerCAmelCase_ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase_ : List[str] = (loss,) + outputs
if not self.training:
lowerCAmelCase_ : List[str] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase_ : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 610
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = 11
_UpperCAmelCase : Union[str, Any] = int("1" + "0" * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_UpperCAmelCase : List[str] = 10
return solutions
def __lowerCAmelCase (__lowerCAmelCase = 2 ):
_UpperCAmelCase : Optional[int] = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
_UpperCAmelCase : Dict = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
import os
def UpperCAmelCase__ ( ) -> List[str]:
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '/p022_names.txt' ) as file:
__lowerCamelCase : Union[str, Any] = str(file.readlines()[0] )
__lowerCamelCase : Any = names.replace('\"' , '' ).split(',' )
names.sort()
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Dict = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE_ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE_ ) - 64
total_score += (i + 1) * name_score
__lowerCamelCase : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 13
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=3 , lowercase=4 , lowercase=2 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=36 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=6 , lowercase=6 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1000 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = coordinate_size
A__ = shape_size
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ = text_seq_length
A__ = (image_size // patch_size) ** 2 + 1
A__ = self.text_seq_length + self.image_seq_length
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = tmp_coordinate
A__ = tf.constant(lowercase )
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.text_seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = TFLayoutLMvaModel(config=lowercase )
# text + image
A__ = model(lowercase , pixel_values=lowercase , training=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , training=lowercase , )
A__ = model(lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A__ = model(lowercase , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A__ = model({"pixel_values": pixel_values} , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForSequenceClassification(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForTokenClassification(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = 2
A__ = TFLayoutLMvaForQuestionAnswering(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , training=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
return True
def UpperCamelCase ( self , lowercase , lowercase , lowercase=False ) -> dict:
'''simple docstring'''
A__ = copy.deepcopy(lowercase )
if model_class in get_values(lowercase ):
A__ = {
k: tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase ):
A__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFLayoutLMvaModelTester(self )
A__ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase )
if getattr(lowercase , "hf_compute_loss" , lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase )[0]
]
A__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class.pop("input_ids" )
A__ = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
A__ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A__ = -100
A__ = tf.convert_to_tensor(lowercase )
A__ = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = model(lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
# Get keys that were added with the _prepare_for_class function
A__ = prepared_for_class.keys() - inputs_dict.keys()
A__ = inspect.signature(model.call ).parameters
A__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A__ = {0: "input_ids"}
for label_key in label_keys:
A__ = signature_names.index(lowercase )
A__ = label_key
A__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A__ = prepared_for_class[value]
A__ = tuple(lowercase )
# Send to model
A__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFLayoutLMvaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowercase ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase , return_tensors="tf" ).pixel_values
A__ = tf.constant([[1, 2]] )
A__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A__ = model(input_ids=lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
# verify the logits
A__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A__ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ) )
| 514
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Tuple = JukeboxTokenizer
A : Optional[Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : Dict = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
SCREAMING_SNAKE_CASE : int = tokenizer(**self.metas )['input_ids']
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : Dict = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
SCREAMING_SNAKE_CASE : str = tokenizer(**self.metas )['input_ids']
# fmt: off
SCREAMING_SNAKE_CASE : Dict = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
| 700
|
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[str, TrieNode] = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
for word in words:
self.insert(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE : Any = TrieNode()
SCREAMING_SNAKE_CASE : Optional[int] = curr.nodes[char]
SCREAMING_SNAKE_CASE : Dict = True
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE : int = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
def _delete(A, A, A ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE : Optional[int] = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE : List[Any] = word[index]
SCREAMING_SNAKE_CASE : Union[str, Any] = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE : Dict = _delete(A, A, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, A, 0 )
def lowercase__( __UpperCamelCase: TrieNode ,__UpperCamelCase: str ):
"""simple docstring"""
if node.is_leaf:
print(__UpperCamelCase ,end=' ' )
for key, value in node.nodes.items():
print_words(__UpperCamelCase ,word + key )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'banana bananas bandana band apple all beast'.split()
SCREAMING_SNAKE_CASE : List[Any] = TrieNode()
root.insert_many(__UpperCamelCase )
# print_words(root, "")
assert all(root.find(__UpperCamelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool ):
"""simple docstring"""
print(str(__UpperCamelCase ) ,'works!' if passes else 'doesn\'t work :(' )
def lowercase__( ):
"""simple docstring"""
assert test_trie()
def lowercase__( ):
"""simple docstring"""
print_results('Testing trie functionality' ,test_trie() )
if __name__ == "__main__":
main()
| 508
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a_ :
lowerCamelCase__ : Optional[int] = XGLMConfig
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : str = 'gelu'
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=0.02 , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_labels
a_ = vocab_size
a_ = d_model
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = ffn_dim
a_ = activation_function
a_ = activation_dropout
a_ = attention_dropout
a_ = max_position_embeddings
a_ = initializer_range
a_ = None
a_ = 0
a_ = 2
a_ = 1
def lowerCAmelCase__ ( self ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase__ ( self ):
a_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = self.get_config()
a_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase__ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase , )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self ):
a_ = TFXGLMModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFXGLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCAmelCase__ ( self ):
super().test_resize_token_embeddings()
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self , UpperCAmelCase=True ):
a_ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
a_ = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
a_ = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
a_ = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
a_ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
a_ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
a_ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
a_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
a_ = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase , seed=[7, 0] )
a_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase )
a_ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
a_ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
a_ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
a_ = """left"""
# use different length sentences to test batching
a_ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
a_ = tokenizer(UpperCAmelCase , return_tensors="""tf""" , padding=UpperCAmelCase )
a_ = inputs["""input_ids"""]
a_ = model.generate(input_ids=UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
a_ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
a_ = model.generate(input_ids=UpperCAmelCase , max_new_tokens=12 )
a_ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
a_ = model.generate(input_ids=UpperCAmelCase , max_new_tokens=12 )
a_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 263
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase__ =3_00 # TEMPERATURE (unit = K)
def UpperCamelCase_ ( A__ , A__ , A__ , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: int = "pegasus"
__magic_name__: Dict = ["past_key_values"]
__magic_name__: str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , _A : str=50265 , _A : Dict=1024 , _A : Optional[Any]=12 , _A : Union[str, Any]=4096 , _A : Any=16 , _A : Optional[int]=12 , _A : Any=4096 , _A : Optional[Any]=16 , _A : Tuple=0.0 , _A : List[Any]=0.0 , _A : str=True , _A : Tuple=True , _A : Optional[Any]="gelu" , _A : Optional[int]=1024 , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=0.0 , _A : Any=0.0 , _A : Union[str, Any]=0.0_2 , _A : int=0 , _A : Optional[int]=False , _A : int=0 , _A : List[Any]=1 , _A : Tuple=1 , **_A : str , ) -> str:
"""simple docstring"""
snake_case_ : List[str] = vocab_size
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Any = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : int = encoder_layers
snake_case_ : Tuple = encoder_attention_heads
snake_case_ : Tuple = decoder_ffn_dim
snake_case_ : List[str] = decoder_layers
snake_case_ : Any = decoder_attention_heads
snake_case_ : Optional[int] = dropout
snake_case_ : str = attention_dropout
snake_case_ : Optional[Any] = activation_dropout
snake_case_ : Any = activation_function
snake_case_ : Dict = init_std
snake_case_ : Union[str, Any] = encoder_layerdrop
snake_case_ : List[str] = decoder_layerdrop
snake_case_ : int = use_cache
snake_case_ : str = encoder_layers
snake_case_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.d_model
| 534
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = None ):
snake_case_ : int = tesseract_config if tesseract_config is not None else ''
# apply OCR
snake_case_ : str = to_pil_image(__a )
snake_case_ ,snake_case_ : str = pil_image.size
snake_case_ : str = pytesseract.image_to_data(__a , lang=__a , output_type='dict' , config=__a )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
snake_case_ : str = [idx for idx, word in enumerate(__a ) if not word.strip()]
snake_case_ : Optional[Any] = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : List[Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
snake_case_ : int = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : int = []
for x, y, w, h in zip(__a , __a , __a , __a ):
snake_case_ : str = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
snake_case_ : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a , __a , __a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: str = ["pixel_values"]
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : int , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Optional[int] = size if size is not None else {'height': 224, 'width': 224}
snake_case_ : List[str] = get_size_dict(_A )
snake_case_ : int = do_resize
snake_case_ : int = size
snake_case_ : Any = resample
snake_case_ : List[str] = apply_ocr
snake_case_ : str = ocr_lang
snake_case_ : Dict = tesseract_config
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case_ : Optional[Any] = (size['height'], size['width'])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : Dict = do_resize if do_resize is not None else self.do_resize
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : Union[str, Any] = get_size_dict(_A )
snake_case_ : Dict = resample if resample is not None else self.resample
snake_case_ : Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : int = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : int = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : str = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
snake_case_ : List[str] = [to_numpy_array(_A ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
snake_case_ : str = []
snake_case_ : List[Any] = []
for image in images:
snake_case_ ,snake_case_ : Tuple = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
snake_case_ : Tuple = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ : int = [flip_channel_order(_A ) for image in images]
snake_case_ : Any = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=_A )
if apply_ocr:
snake_case_ : Dict = words_batch
snake_case_ : List[str] = boxes_batch
return data
| 534
| 1
|
'''simple docstring'''
lowerCAmelCase = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 525
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=_A , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=_A , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_A , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case_ = field(
default=_A , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_A , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : int = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase : Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = predict_dataset.features["label"].names
# Labels
lowerCAmelCase : Tuple = len(a_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a_ ,idalabel={str(a_ ): label for i, label in enumerate(a_ )} ,labelaid={label: i for i, label in enumerate(a_ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : int = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Union[str, Any] = False
def preprocess_function(a_ : Dict ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=a_ ,max_length=data_args.max_seq_length ,truncation=a_ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = min(len(a_ ) ,data_args.max_train_samples )
lowerCAmelCase : int = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase : Optional[int] = train_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(a_ ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : int = min(len(a_ ) ,data_args.max_eval_samples )
lowerCAmelCase : Optional[Any] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase : Optional[Any] = eval_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Optional[Any] = min(len(a_ ) ,data_args.max_predict_samples )
lowerCAmelCase : Optional[Any] = predict_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase : Tuple = predict_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowerCAmelCase : str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
lowerCAmelCase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions ,a_ ) else p.predictions
lowerCAmelCase : Any = np.argmax(a_ ,axis=1 )
return metric.compute(predictions=a_ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Union[str, Any] = DataCollatorWithPadding(a_ ,pad_to_multiple_of=8 )
else:
lowerCAmelCase : str = None
# Initialize our Trainer
lowerCAmelCase : Tuple = Trainer(
model=a_ ,args=a_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=a_ ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : List[str] = last_checkpoint
lowerCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
lowerCAmelCase : List[Any] = train_result.metrics
lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,a_ )
trainer.save_metrics("train" ,a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=a_ )
lowerCAmelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("eval" ,a_ )
trainer.save_metrics("eval" ,a_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = trainer.predict(a_ ,metric_key_prefix="predict" )
lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("predict" ,a_ )
trainer.save_metrics("predict" ,a_ )
lowerCAmelCase : Optional[int] = np.argmax(a_ ,axis=1 )
lowerCAmelCase : Any = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(a_ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(a_ ):
lowerCAmelCase : str = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 525
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 1
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase : List[str] = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
lowerCamelCase : Tuple = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase_ = state_dict.pop(lowercase )
# emb -> embedding
if name.startswith('emb.' ):
lowerCamelCase_ = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCamelCase_ = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCamelCase_ = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , lowercase )
# ffn -> feed_forward
lowerCamelCase_ = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCamelCase_ = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCamelCase_ = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCamelCase_ = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCamelCase_ = 'rwkv.' + name
lowerCamelCase_ = weight
return state_dict
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : int=None , lowercase : str=None , lowercase : Any=False , lowercase : Tuple=None ):
'''simple docstring'''
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCamelCase_ = 5_02_77
lowerCamelCase_ = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCamelCase_ = PreTrainedTokenizerFast(tokenizer_file=lowercase )
lowerCamelCase_ = len(lowercase )
tokenizer.save_pretrained(lowercase )
# 2. Build the config
lowerCamelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase_ = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
lowerCamelCase_ = RwkvConfig(
vocab_size=lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase )
# 3. Download model file then convert state_dict
lowerCamelCase_ = hf_hub_download(lowercase , lowercase )
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
lowerCamelCase_ = convert_state_dict(lowercase )
# 4. Split in shards and save
lowerCamelCase_ , lowerCamelCase_ = shard_checkpoint(lowercase )
for shard_file, shard in shards.items():
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if index is not None:
lowerCamelCase_ = os.path.join(lowercase , lowercase )
# Save the index as well
with open(lowercase , 'w' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + '\n'
f.write(lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCamelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase_ = torch.load(os.path.join(lowercase , lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase , lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(lowercase )
model.push_to_hub(lowercase , max_shard_size='2GB' )
tokenizer.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 70
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCAmelCase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
| 0
|
"""simple docstring"""
import baseaa
def UpperCAmelCase ( snake_case : str ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def UpperCAmelCase ( snake_case : bytes ):
return baseaa.aaadecode(snake_case ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase ( snake_case : List[Any]=2 , snake_case : int=3 , snake_case : Optional[int]=16 , snake_case : int = 10 , snake_case : int = 2 ):
def get_dataset(snake_case : Any ):
_lowerCAmelCase:Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCAmelCase:Tuple = get_dataset(snake_case )
_lowerCAmelCase:List[Any] = get_dataset(snake_case )
_lowerCAmelCase:Dict = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
_lowerCAmelCase:Any = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase ( snake_case : List[Any] , snake_case : int , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Tuple=None ):
_lowerCAmelCase:Optional[Any] = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase:List[Any] = batch
_lowerCAmelCase:int = model(snake_case )
_lowerCAmelCase:Optional[int] = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a__ ( nn.Module ):
def __init__( self : List[Any]) -> Any:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:str = nn.Parameter(torch.randn(1))
_lowerCAmelCase:Dict = nn.Parameter(torch.randn(1))
def __UpperCamelCase ( self : Tuple ,a__ : List[Any]) -> Tuple:
"""simple docstring"""
return x * self.a + self.b
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:List[str] = DummyModel()
_lowerCAmelCase:Any = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = dummy_dataloaders()
_lowerCAmelCase:List[str] = ProjectConfiguration(total_limit=1 ,project_dir=a__ ,automatic_checkpoint_naming=a__)
# Train baseline
_lowerCAmelCase:List[Any] = Accelerator(project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:int = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)) ,1)
def __UpperCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:int = DummyModel()
_lowerCAmelCase:Dict = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Dict = dummy_dataloaders()
# Train baseline
_lowerCAmelCase:List[str] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Optional[Any] = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
# Save initial
_lowerCAmelCase:Union[str, Any] = os.path.join(a__ ,'''initial''')
accelerator.save_state(a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):Any = model.a.item(), model.b.item()
_lowerCAmelCase:Optional[int] = optimizer.state_dict()
_lowerCAmelCase:Tuple = train(3 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):Optional[int] = model.a.item(), model.b.item()
_lowerCAmelCase:Any = optimizer.state_dict()
# Train partially
set_seed(42)
_lowerCAmelCase:str = DummyModel()
_lowerCAmelCase:Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Any = dummy_dataloaders()
_lowerCAmelCase:Union[str, Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Tuple = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
accelerator.load_state(a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):str = model.a.item(), model.b.item()
_lowerCAmelCase:Any = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
_lowerCAmelCase:str = train(2 ,a__ ,a__ ,a__ ,a__)
# Save everything
_lowerCAmelCase:Optional[Any] = os.path.join(a__ ,'''checkpoint''')
accelerator.save_state(a__)
# Load everything back in and make sure all states work
accelerator.load_state(a__)
test_rands += train(1 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):List[Any] = model.a.item(), model.b.item()
_lowerCAmelCase:List[Any] = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:List[str] = DummyModel()
_lowerCAmelCase:int = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Tuple = dummy_dataloaders()
_lowerCAmelCase:Any = ProjectConfiguration(automatic_checkpoint_naming=a__)
# Train baseline
_lowerCAmelCase:str = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:List[Any] = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
# Save initial
accelerator.save_state()
((_lowerCAmelCase) , (_lowerCAmelCase)):List[str] = model.a.item(), model.b.item()
_lowerCAmelCase:str = optimizer.state_dict()
_lowerCAmelCase:Optional[int] = train(3 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):int = model.a.item(), model.b.item()
_lowerCAmelCase:int = optimizer.state_dict()
# Train partially
set_seed(42)
_lowerCAmelCase:Optional[int] = DummyModel()
_lowerCAmelCase:Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = dummy_dataloaders()
_lowerCAmelCase:Union[str, Any] = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=a__)
_lowerCAmelCase:Dict = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Optional[int] = accelerator.prepare(
a__ ,a__ ,a__ ,a__)
accelerator.load_state(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_0'''))
((_lowerCAmelCase) , (_lowerCAmelCase)):Dict = model.a.item(), model.b.item()
_lowerCAmelCase:int = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
_lowerCAmelCase:Dict = train(2 ,a__ ,a__ ,a__ ,a__)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_1'''))
test_rands += train(1 ,a__ ,a__ ,a__ ,a__)
((_lowerCAmelCase) , (_lowerCAmelCase)):Dict = model.a.item(), model.b.item()
_lowerCAmelCase:Union[str, Any] = optimizer.state_dict()
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
self.assertEqual(a__ ,a__)
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:str = torch.tensor([1, 2, 3])
_lowerCAmelCase:Tuple = torch.tensor([2, 3, 4])
_lowerCAmelCase:Tuple = DummyModel()
_lowerCAmelCase:Union[str, Any] = torch.optim.Adam(net.parameters())
_lowerCAmelCase:Union[str, Any] = Accelerator()
with self.assertRaises(a__) as ve:
accelerator.register_for_checkpointing(a__ ,a__ ,a__ ,a__)
_lowerCAmelCase:List[Any] = str(ve.exception)
self.assertTrue('''Item at index 0''' in message)
self.assertTrue('''Item at index 1''' in message)
self.assertFalse('''Item at index 2''' in message)
self.assertFalse('''Item at index 3''' in message)
def __UpperCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:int = DummyModel()
_lowerCAmelCase:str = torch.optim.Adam(params=model.parameters() ,lr=1E-3)
_lowerCAmelCase:Union[str, Any] = torch.optim.lr_scheduler.StepLR(a__ ,step_size=1 ,gamma=0.99)
_lowerCAmelCase , _lowerCAmelCase:List[str] = dummy_dataloaders()
_lowerCAmelCase:Any = ProjectConfiguration(automatic_checkpoint_naming=a__)
# Train baseline
_lowerCAmelCase:Optional[int] = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:int = accelerator.prepare(
a__ ,a__ ,a__ ,a__ ,a__)
# Save initial
accelerator.save_state()
_lowerCAmelCase:Dict = scheduler.state_dict()
train(3 ,a__ ,a__ ,a__ ,a__ ,a__)
self.assertNotEqual(a__ ,scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_0'''))
self.assertEqual(a__ ,scheduler.state_dict())
def __UpperCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42)
_lowerCAmelCase:str = DummyModel()
_lowerCAmelCase:Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=a__ ,total_limit=2)
# Train baseline
_lowerCAmelCase:List[str] = Accelerator(project_dir=a__ ,project_config=a__)
_lowerCAmelCase:List[str] = accelerator.prepare(a__)
# Save 3 states:
for _ in range(11):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_0''')))
self.assertTrue(os.path.exists(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_9''')))
self.assertTrue(os.path.exists(os.path.join(a__ ,'''checkpoints''' ,'''checkpoint_10''')))
@require_cuda
def __UpperCamelCase ( self : int) -> str:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
execute_subprocess_async(a__ ,env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase__ = '''/tmp/accelerate/state_checkpointing'''
UpperCamelCase__ = DummyModel()
UpperCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase__ , UpperCamelCase__ = dummy_dataloaders()
UpperCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase__ = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
UpperCamelCase__ = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
UpperCamelCase__ = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 439
| 0
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase_ )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=UpperCAmelCase_ )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Tuple=False , UpperCAmelCase : bool = False , ) -> Tuple:
lowerCamelCase__ : Optional[int] = hans_processors[task]()
lowerCamelCase__ : int = os.path.join(
_snake_case , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_snake_case ) , _snake_case , ) , )
lowerCamelCase__ : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : int = label_list[2], label_list[1]
lowerCamelCase__ : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : Optional[Any] = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : str = torch.load(_snake_case )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : Tuple = (
processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
)
logger.info('Training examples: %s' , len(_snake_case ) )
lowerCamelCase__ : List[Any] = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
logger.info('Saving features into cached file %s' , _snake_case )
torch.save(self.features , _snake_case )
def __len__( self : Dict ) -> Tuple:
return len(self.features )
def __getitem__( self : Any , UpperCAmelCase : Dict ) -> Tuple:
return self.features[i]
def A_ ( self : str ) -> str:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : bool = False , ) -> int:
lowerCamelCase__ : List[Any] = hans_processors[task]()
lowerCamelCase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Any = label_list[2], label_list[1]
lowerCamelCase__ : Dict = label_list
lowerCamelCase__ : Union[str, Any] = processor.get_dev_examples(_snake_case ) if evaluate else processor.get_train_examples(_snake_case )
lowerCamelCase__ : Optional[int] = hans_convert_examples_to_features(_snake_case , _snake_case , _snake_case , _snake_case )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_snake_case )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Any = tf.data.Dataset.from_generator(
_snake_case , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Tuple ) -> Optional[int]:
return self.dataset
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : str , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
return self.features[i]
def A_ ( self : Any ) -> Any:
return self.label_list
class lowerCAmelCase ( UpperCAmelCase_ ):
def A_ ( self : List[str] , UpperCAmelCase : int ) -> str:
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(_snake_case , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Optional[Any] ) -> List[str]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> Optional[int]:
lowerCamelCase__ : Tuple = []
for i, line in enumerate(_snake_case ):
if i == 0:
continue
lowerCamelCase__ : List[str] = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : Tuple = line[5]
lowerCamelCase__ : int = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : List[Any] = line[0]
examples.append(InputExample(guid=_snake_case , text_a=_snake_case , text_b=_snake_case , label=_snake_case , pairID=_snake_case ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
lowerCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCamelCase )}
lowerCamelCase__ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(__UpperCamelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=__UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , truncation=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , )
lowerCamelCase__ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**__UpperCamelCase , label=__UpperCamelCase , pairID=__UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : List[str] = {
"""hans""": 3,
}
_UpperCAmelCase : List[str] = {
"""hans""": HansProcessor,
}
| 295
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_snake_case : List[Any] = "src/transformers"
# Matches is_xxx_available()
_snake_case : int = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_snake_case : List[Any] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_snake_case : Optional[Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_snake_case : Tuple = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_snake_case : Tuple = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_snake_case : str = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_snake_case : str = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_snake_case : Any = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_snake_case : int = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_snake_case : str = re.compile(R"^\s*try:")
# Catches a line with else:
_snake_case : Dict = re.compile(R"^\s*else:")
def lowerCAmelCase_ ( __lowerCamelCase ):
if _re_test_backend.search(__lowerCamelCase ) is None:
return None
__snake_case : List[str] = [b[0] for b in _re_backend.findall(__lowerCamelCase )]
backends.sort()
return "_and_".join(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(__lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : int = f.readlines()
__snake_case : Dict = 0
while line_index < len(__lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__snake_case : int = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__snake_case : Dict = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCamelCase ):
__snake_case : Any = _re_one_line_import_struct.search(__lowerCamelCase ).groups()[0]
__snake_case : Optional[Any] = re.findall("\[([^\]]+)\]" , __lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__snake_case : List[Any] = _re_import_struct_key_value.search(__lowerCamelCase )
if single_line_import_search is not None:
__snake_case : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__snake_case : Any = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__snake_case : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__snake_case : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__snake_case : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__snake_case : int = lines[line_index]
if _re_import_struct_add_one.search(__lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCamelCase ) is not None:
__snake_case : Optional[int] = _re_import_struct_add_many.search(__lowerCamelCase ).groups()[0].split(", " )
__snake_case : str = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_between_brackets.search(__lowerCamelCase ) is not None:
__snake_case : int = _re_between_brackets.search(__lowerCamelCase ).groups()[0].split(", " )
__snake_case : Union[str, Any] = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_quote_object.search(__lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
__snake_case : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__snake_case : int = []
while (
line_index < len(__lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__snake_case : List[str] = lines[line_index]
__snake_case : Union[str, Any] = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__snake_case : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__snake_case : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__snake_case : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__snake_case : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__snake_case : Optional[Any] = lines[line_index]
__snake_case : Dict = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__snake_case : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
def find_duplicates(__lowerCamelCase ):
return [k for k, v in collections.Counter(__lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__snake_case : str = []
for key in import_dict_objects.keys():
__snake_case : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__snake_case : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__snake_case : str = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase_ ( ):
__snake_case : Tuple = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
__snake_case : int = os.path.join(__lowerCamelCase , "__init__.py" )
__snake_case : List[Any] = parse_init(__lowerCamelCase )
if objects is not None:
__snake_case : Optional[int] = analyze_results(*__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__snake_case : Union[str, Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
raise ValueError("\n\n".join(__lowerCamelCase ) )
def lowerCAmelCase_ ( ):
__snake_case : str = []
for path, directories, files in os.walk(__lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
__snake_case : Dict = str((Path(__lowerCamelCase ) / folder).relative_to(__lowerCamelCase ) )
__snake_case : str = short_path.replace(os.path.sep , "." )
submodules.append(__lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__snake_case : Dict = str((Path(__lowerCamelCase ) / fname).relative_to(__lowerCamelCase ) )
__snake_case : List[str] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCamelCase )
return submodules
_snake_case : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowerCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
__snake_case : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__lowerCamelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__snake_case : Optional[int] = spec.loader.load_module()
__snake_case : int = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCamelCase ) > 0:
__snake_case : Union[str, Any] = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 203
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Tuple=13 , lowerCamelCase : Tuple=32 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : Tuple=16 , lowerCamelCase : Any=[32, 64, 128] , lowerCamelCase : str=[1, 2, 1] , lowerCamelCase : Union[str, Any]=[2, 2, 4] , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2.0 , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Any=False , lowerCamelCase : Any=True , lowerCamelCase : str=0.02 , lowerCamelCase : Tuple=1E-5 , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=["stage1", "stage2"] , lowerCamelCase : Optional[int]=[1, 2] , ) -> Optional[int]:
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Optional[int] = image_size
__snake_case : int = patch_size
__snake_case : Any = num_channels
__snake_case : List[Any] = embed_dim
__snake_case : str = hidden_sizes
__snake_case : int = depths
__snake_case : Any = num_heads
__snake_case : Any = window_size
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Any = qkv_bias
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Tuple = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = use_absolute_embeddings
__snake_case : Optional[int] = patch_norm
__snake_case : int = layer_norm_eps
__snake_case : Any = initializer_range
__snake_case : Any = is_training
__snake_case : Tuple = scope
__snake_case : Tuple = use_labels
__snake_case : Any = type_sequence_label_size
__snake_case : Dict = encoder_stride
__snake_case : Union[str, Any] = out_features
__snake_case : Union[str, Any] = out_indices
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str ) -> str:
__snake_case : Tuple = FocalNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
__snake_case : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] ) -> Optional[int]:
__snake_case : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Dict = None
__snake_case : Any = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : List[Any] = FocalNetForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : List[str] = 1
__snake_case : Any = FocalNetForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self : str , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : Tuple = self.type_sequence_label_size
__snake_case : List[Any] = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : str = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : Optional[Any] = FocalNetModelTester(self )
__snake_case : int = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : str ) -> Tuple:
return
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : str ) -> int:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def __snake_case ( self : str ) -> List[str]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> List[str]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Dict = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ) -> str:
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# FocalNet has a different seq_length
__snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = reshaped_hidden_states[0].shape
__snake_case : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : Tuple ) -> int:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@slow
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = FocalNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[Any]:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __snake_case ( self : int ) -> Optional[int]:
__snake_case : List[str] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase )
__snake_case : str = self.default_image_processor
__snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case : Optional[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**lowerCamelCase )
# verify the logits
__snake_case : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Dict = FocalNetConfig
__UpperCAmelCase : Any = False
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = FocalNetModelTester(self )
| 203
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
_a = []
def generate(_UpperCAmelCase , _UpperCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_a , _a = arr[k - 1], arr[i]
else: # k is odd
_a , _a = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
lowercase_ = input('Enter numbers separated by a comma:\n').strip()
lowercase_ = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 562
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int = RoCBertTokenizer
snake_case__ : int = None
snake_case__ : Optional[Any] = False
snake_case__ : int = True
snake_case__ : Any = filter_non_english
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
super().setUp()
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCAmelCase_ ( self : int ) -> Dict:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = ["的", "人", "有"]
__SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE = "你好,你是谁"
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 682
| 0
|
from __future__ import annotations
from statistics import mean
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = [0] * no_of_processes
_UpperCAmelCase : Union[str, Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = burst_time[i]
_UpperCAmelCase : list[int] = []
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Tuple = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : int = -1
for i in range(_SCREAMING_SNAKE_CASE ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase : str = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase : Any = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase : int = 0
_UpperCAmelCase : Any = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__lowerCamelCase = 4
__lowerCamelCase = [2, 5, 3, 7]
__lowerCamelCase = [0, 0, 0, 0]
__lowerCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCamelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 712
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328
| 0
|
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _UpperCAmelCase ( UpperCamelCase__ ):
__lowerCamelCase: torch.FloatTensor
__lowerCamelCase: Optional[torch.FloatTensor] = None
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase_ : Union[str, Any] = []
for i in range(__UpperCamelCase ):
lowercase_ : str = i / num_diffusion_timesteps
lowercase_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class _UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase: str = 1
@register_to_config
def __init__( self : str , a : Any = 1_0_0_0 , a : Optional[int] = 0.0001 , a : Optional[Any] = 0.02 , a : Optional[Any] = "linear" , a : int = None , a : List[str] = True , a : int = True , a : Union[str, Any] = 0 , a : List[Any] = "epsilon" , a : Optional[int] = 1.0 , **a : Optional[Any] , ):
'''simple docstring'''
if kwargs.get("set_alpha_to_one" , a ) is not None:
lowercase_ : Tuple = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , a , standard_warn=a )
lowercase_ : List[Any] = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase_ : int = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ : List[Any] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(a )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase_ : int = 1.0 - self.betas
lowercase_ : Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase_ : List[str] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = 1.0
# setable values
lowercase_ : str = None
lowercase_ : List[str] = torch.from_numpy(np.arange(0 , a ).copy().astype(np.intaa ) )
def lowerCAmelCase__ ( self : List[str] , a : Optional[int] , a : List[str] = None ):
'''simple docstring'''
return sample
def lowerCAmelCase__ ( self : str , a : Union[str, Any] , a : str = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase_ : str = num_inference_steps
lowercase_ : Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ : Union[str, Any] = (np.arange(0 , a ) * step_ratio).round().copy().astype(np.intaa )
lowercase_ : Dict = torch.from_numpy(a ).to(a )
self.timesteps += self.config.steps_offset
def lowerCAmelCase__ ( self : int , a : List[str] , a : Union[str, Any] , a : List[str] , a : Optional[Any] = 0.0 , a : str = False , a : int = None , a : Dict = True , ):
'''simple docstring'''
lowercase_ : Tuple = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase_ : int = self.alphas_cumprod[timestep]
lowercase_ : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase_ : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase_ : Union[str, Any] = model_output
elif self.config.prediction_type == "sample":
lowercase_ : Tuple = model_output
lowercase_ : List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase_ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase_ : List[str] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase_ : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : str = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=a , pred_original_sample=a )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 620
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ : Optional[int] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase__ : Dict = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
a = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
a = bs[:]
a = 0
for b in range(2**8):
if b not in bs:
bs.append(__UpperCamelCase)
cs.append(2**8 + n)
n += 1
a = [chr(__UpperCamelCase) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Union[str, Any]:
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a = char
return pairs
class a__ ( UpperCamelCase__ ):
a : List[str] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> int:
'''simple docstring'''
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="utf-8" ) as vocab_handle:
a = json.load(A )
a = {v: k for k, v in self.encoder.items()}
a = errors # how to handle errors in decoding
a = bytes_to_unicode()
a = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="utf-8" ) as merges_handle:
a = merges_handle.read().split("\n" )[1:-1]
a = [tuple(merge.split() ) for merge in bpe_merges]
a = dict(zip(A , range(len(A ) ) ) )
a = {}
a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a = tuple(A )
a = get_pairs(A )
if not pairs:
return token
while True:
a = min(A , key=lambda A : self.bpe_ranks.get(A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(A ):
try:
a = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(A )
a = new_word
if len(A ) == 1:
break
else:
a = get_pairs(A )
a = " ".join(A )
a = word
return word
def lowerCAmelCase_ ( self , A ) -> Optional[Any]:
'''simple docstring'''
a = []
for token in re.findall(self.pat , A ):
a = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(" " ) )
return bpe_tokens
def lowerCAmelCase_ ( self , A ) -> str:
'''simple docstring'''
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
return self.decoder.get(A )
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
a = "".join(A )
a = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + "\n" )
a = 0
with open(A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
a = token_index
writer.write(" ".join(A ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def lowerCAmelCase_ ( self , A , A = None ) -> List[int]:
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self , A , A=False , **A ) -> List[str]:
'''simple docstring'''
a = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
a = " " + text
return (text, kwargs)
def lowerCAmelCase_ ( self , A , A = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self , A ) -> List[int]:
'''simple docstring'''
a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(A )
a = " ".join(A )
a = self.encode(A )
if len(A ) > self.model_max_length:
a = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 515
| 0
|
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowercase : List[Any] = True
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ )
current_sequence.pop()
_lowercase : int = False
UpperCAmelCase: Tuple = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCAmelCase: Optional[int] = ["""A""", """B""", """C"""]
generate_all_permutations(sequence_a)
| 704
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=2 ,UpperCAmelCase_=8 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=16 ,UpperCAmelCase_=5 ,UpperCAmelCase_=2 ,UpperCAmelCase_=36 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,):
_lowercase : Tuple = parent
_lowercase : Dict = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Any = is_training
_lowercase : List[str] = use_input_mask
_lowercase : Any = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : Tuple = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Dict = num_labels
_lowercase : int = num_choices
_lowercase : int = scope
def lowerCamelCase__ ( self ):
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : List[str] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Dict = None
if self.use_token_type_ids:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[int] = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,)
def lowerCamelCase__ ( self ):
_lowercase : Any = self.get_config()
_lowercase : Union[str, Any] = 3_00
return config
def lowerCamelCase__ ( self ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = self.prepare_config_and_inputs()
_lowercase : int = True
_lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = MraModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : str = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
_lowercase : Union[str, Any] = model(UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
_lowercase : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : Union[str, Any] = True
_lowercase : Optional[int] = MraModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Dict = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,encoder_hidden_states=UpperCAmelCase_ ,encoder_attention_mask=UpperCAmelCase_ ,)
_lowercase : Dict = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,encoder_hidden_states=UpperCAmelCase_ ,)
_lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : int = MraForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = MraForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : str = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,start_positions=UpperCAmelCase_ ,end_positions=UpperCAmelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.num_labels
_lowercase : Optional[int] = MraForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Optional[Any] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = self.num_labels
_lowercase : List[str] = MraForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : Optional[int] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.num_choices
_lowercase : List[str] = MraForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowercase : List[str] = model(
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,labels=UpperCAmelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ):
_lowercase : str = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = config_and_inputs
_lowercase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = ()
def lowerCamelCase__ ( self ):
_lowercase : int = MraModelTester(self )
_lowercase : List[str] = ConfigTester(self ,config_class=UpperCAmelCase_ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : List[str] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = MraModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowerCamelCase__ ( self ):
return
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ):
_lowercase : int = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowercase : List[Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : List[Any] = model(UpperCAmelCase_ )[0]
_lowercase : List[Any] = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
_lowercase : Optional[int] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_lowercase : Optional[int] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Optional[Any] = model(UpperCAmelCase_ )[0]
_lowercase : Tuple = 5_02_65
_lowercase : Tuple = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
_lowercase : Any = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Tuple = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_lowercase : int = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Optional[Any] = model(UpperCAmelCase_ )[0]
_lowercase : Tuple = 5_02_65
_lowercase : str = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape ,UpperCAmelCase_ )
_lowercase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) )
| 600
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Optional[Any] = """ylacombe/bark-small"""
__A : Any = tempfile.mkdtemp()
__A : Optional[int] = """en_speaker_1"""
__A : List[Any] = """This is a test string"""
__A : int = """speaker_embeddings_path.json"""
__A : List[str] = """speaker_embeddings"""
def lowerCAmelCase_ ( self : Union[str, Any] , **__A : Any ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__A )
def lowerCAmelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
__A : str = self.get_tokenizer()
__A : Optional[Any] = BarkProcessor(tokenizer=__A )
processor.save_pretrained(self.tmpdirname )
__A : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__A : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__A : Optional[int] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__A : int = 35
__A : Union[str, Any] = 2
__A : Optional[Any] = 8
__A : List[str] = {
"""semantic_prompt""": np.ones(__A ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__A : Union[str, Any] = processor(text=self.input_string , voice_preset=__A )
__A : List[str] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__A : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__A , **__A )
__A : Optional[Any] = processor(text=self.input_string , voice_preset=__A )
__A : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__A : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase_ ( self : Tuple ):
__A : str = self.get_tokenizer()
__A : str = BarkProcessor(tokenizer=__A )
__A : int = processor(text=self.input_string )
__A : int = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 17
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
SCREAMING_SNAKE_CASE : Optional[int] = """▁"""
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = AlbertTokenizer
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Dict="[CLS]" , __SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" , __SCREAMING_SNAKE_CASE : Dict="<unk>" , __SCREAMING_SNAKE_CASE : str="[SEP]" , __SCREAMING_SNAKE_CASE : Any="<pad>" , __SCREAMING_SNAKE_CASE : List[str]="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , **__SCREAMING_SNAKE_CASE : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 197
| 0
|
class a__ :
"""simple docstring"""
def __init__( self : Union[str, Any] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = """"""
SCREAMING_SNAKE_CASE : Dict = """"""
SCREAMING_SNAKE_CASE : List[Any] = []
def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.__min_dist_top_down_dp(UpperCAmelCase__ , n - 1 )
SCREAMING_SNAKE_CASE : List[Any] = self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE : List[Any] = 1 + min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self.dp[m][n]
def _lowercase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = worda
SCREAMING_SNAKE_CASE : int = worda
SCREAMING_SNAKE_CASE : str = [[-1 for _ in range(len(UpperCAmelCase__ ) )] for _ in range(len(UpperCAmelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase__ ) - 1 , len(UpperCAmelCase__ ) - 1 )
def _lowercase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = worda
SCREAMING_SNAKE_CASE : Dict = worda
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE : List[Any] = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE : Optional[int] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE : List[Any] = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE : Dict = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE : List[str] = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
UpperCAmelCase__ : str = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
UpperCAmelCase__ : str = input("""Enter the first string: """).strip()
UpperCAmelCase__ : str = input("""Enter the second string: """).strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 446
|
from __future__ import annotations
import numpy as np
def __lowercase ( _A ) -> tuple[np.ndarray, np.ndarray]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = np.shape(_A )
if rows != columns:
SCREAMING_SNAKE_CASE : int = (
"""'table' has to be of square shaped array but got a """
F"{rows}x{columns} array:\n{table}"
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((rows, columns) )
for i in range(_A ):
for j in range(_A ):
SCREAMING_SNAKE_CASE : Tuple = sum(lower[i][k] * upper[k][j] for k in range(_A ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for j in range(_A , _A ):
SCREAMING_SNAKE_CASE : List[str] = sum(lower[i][k] * upper[k][j] for k in range(_A ) )
SCREAMING_SNAKE_CASE : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446
| 1
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCAmelCase__ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowerCAmelCase__ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowerCAmelCase__ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def _lowercase (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE_ = np.array([re.sub(SCREAMING_SNAKE_CASE_ , '''''' , SCREAMING_SNAKE_CASE_ ) for x in predictions] )
SCREAMING_SNAKE_CASE_ = np.array([re.sub(SCREAMING_SNAKE_CASE_ , '''''' , SCREAMING_SNAKE_CASE_ ) for x in references] )
else:
SCREAMING_SNAKE_CASE_ = np.asarray(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.asarray(SCREAMING_SNAKE_CASE_ )
if ignore_case:
SCREAMING_SNAKE_CASE_ = np.char.lower(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.char.lower(SCREAMING_SNAKE_CASE_ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE_ = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
SCREAMING_SNAKE_CASE_ = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ )
if ignore_numbers:
SCREAMING_SNAKE_CASE_ = string.digits.maketrans('''''' , '''''' , string.digits )
SCREAMING_SNAKE_CASE_ = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = predictions == references
return {"exact_match": np.mean(SCREAMING_SNAKE_CASE_ ) * 1_00}
| 626
|
import inspect
import unittest
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__snake_case = inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__snake_case = "k-diffusion"
elif backend == "invisible_watermark":
__snake_case = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 371
| 0
|
'''simple docstring'''
import os
import sys
import unittest
snake_case_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
snake_case_ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
snake_case_ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_test_to_tester_mapping(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = get_test_to_tester_mapping(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = {'''BertModelTest''': '''BertModelTester'''}
SCREAMING_SNAKE_CASE : Dict = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ), __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ), __UpperCAmelCase )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_model_to_test_mapping(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = get_model_to_test_mapping(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
SCREAMING_SNAKE_CASE : List[str] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ), __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ), __UpperCAmelCase )
def lowercase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = get_model_to_tester_mapping(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = get_model_to_tester_mapping(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
SCREAMING_SNAKE_CASE : Any = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ), __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ), __UpperCAmelCase )
| 355
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a__ ( _lowercase ):
__magic_name__ : Dict = "wav2vec2"
def __init__(self : Tuple, __UpperCAmelCase : Tuple=32, __UpperCAmelCase : int=768, __UpperCAmelCase : Optional[Any]=12, __UpperCAmelCase : Any=12, __UpperCAmelCase : Any=3072, __UpperCAmelCase : Optional[int]="gelu", __UpperCAmelCase : Dict=0.1, __UpperCAmelCase : List[str]=0.1, __UpperCAmelCase : List[Any]=0.1, __UpperCAmelCase : Union[str, Any]=0.0, __UpperCAmelCase : str=0.0, __UpperCAmelCase : Optional[Any]=0.1, __UpperCAmelCase : Union[str, Any]=0.1, __UpperCAmelCase : Union[str, Any]=0.02, __UpperCAmelCase : Dict=1e-5, __UpperCAmelCase : List[str]="group", __UpperCAmelCase : str="gelu", __UpperCAmelCase : str=(512, 512, 512, 512, 512, 512, 512), __UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), __UpperCAmelCase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2), __UpperCAmelCase : Any=False, __UpperCAmelCase : Optional[Any]=128, __UpperCAmelCase : Tuple=16, __UpperCAmelCase : str=False, __UpperCAmelCase : int=True, __UpperCAmelCase : Tuple=0.05, __UpperCAmelCase : List[Any]=10, __UpperCAmelCase : Union[str, Any]=2, __UpperCAmelCase : Optional[Any]=0.0, __UpperCAmelCase : List[Any]=10, __UpperCAmelCase : Dict=0, __UpperCAmelCase : Any=320, __UpperCAmelCase : int=2, __UpperCAmelCase : Dict=0.1, __UpperCAmelCase : Tuple=100, __UpperCAmelCase : Any=256, __UpperCAmelCase : Tuple=256, __UpperCAmelCase : str=0.1, __UpperCAmelCase : Any="sum", __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : str=False, __UpperCAmelCase : str=256, __UpperCAmelCase : Union[str, Any]=(512, 512, 512, 512, 1500), __UpperCAmelCase : Optional[int]=(5, 3, 3, 1, 1), __UpperCAmelCase : Dict=(1, 2, 3, 1, 1), __UpperCAmelCase : List[str]=512, __UpperCAmelCase : Union[str, Any]=0, __UpperCAmelCase : List[Any]=1, __UpperCAmelCase : List[str]=2, __UpperCAmelCase : List[Any]=False, __UpperCAmelCase : str=3, __UpperCAmelCase : Tuple=2, __UpperCAmelCase : str=3, __UpperCAmelCase : int=None, __UpperCAmelCase : List[Any]=None, **__UpperCAmelCase : int, ) -> Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase, pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_activation
SCREAMING_SNAKE_CASE : Dict = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = conv_bias
SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : int = feat_proj_dropout
SCREAMING_SNAKE_CASE : List[str] = final_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = do_stable_layer_norm
SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : str = apply_spec_augment
SCREAMING_SNAKE_CASE : Dict = mask_time_prob
SCREAMING_SNAKE_CASE : Optional[int] = mask_time_length
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Dict = mask_feature_prob
SCREAMING_SNAKE_CASE : Tuple = mask_feature_length
SCREAMING_SNAKE_CASE : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : Any = num_codevectors_per_group
SCREAMING_SNAKE_CASE : int = num_codevector_groups
SCREAMING_SNAKE_CASE : Optional[int] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : List[Any] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = num_negatives
SCREAMING_SNAKE_CASE : Any = codevector_dim
SCREAMING_SNAKE_CASE : str = proj_codevector_dim
SCREAMING_SNAKE_CASE : Optional[int] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : int = add_adapter
SCREAMING_SNAKE_CASE : Union[str, Any] = adapter_kernel_size
SCREAMING_SNAKE_CASE : int = adapter_stride
SCREAMING_SNAKE_CASE : List[str] = num_adapter_layers
SCREAMING_SNAKE_CASE : Tuple = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : int = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = xvector_output_dim
@property
def lowercase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 355
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE ) // 2
UpperCAmelCase_ : Union[str, Any] = arr[0:mid]
UpperCAmelCase_ : Dict = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : str = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = _count_cross_inversions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = 0
while i < len(_SCREAMING_SNAKE_CASE ) and j < len(_SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ : Optional[int] = count_inversions_bf(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ : Dict = count_inversions_bf(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = count_inversions_bf(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71
|
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCamelCase_ ( *UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = list(UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : Optional[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCamelCase_ ( UpperCAmelCase_ ) ->bool:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCamelCase_ ( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ) ->str:
"""simple docstring"""
if function is None:
return functools.partial(UpperCAmelCase_ , starting_batch_size=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase : Optional[int] = list(inspect.signature(UpperCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(UpperCAmelCase_ ) < (len(UpperCAmelCase_ ) + 1):
__UpperCAmelCase : Dict = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(UpperCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 522
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """Pix2StructImageProcessor"""
lowercase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = False
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : str, lowerCamelCase : str=None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = 2_048, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : Any, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase__ = self.image_processor(
lowerCamelCase, return_tensors=lowerCamelCase, max_patches=lowerCamelCase, **lowerCamelCase )
else:
# add pixel_values and bbox
lowercase__ = self.image_processor(
lowerCamelCase, return_tensors=lowerCamelCase, max_patches=lowerCamelCase, header_text=lowerCamelCase, **lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
if "attention_mask" in text_encoding:
lowercase__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowercase__ = text_encoding.pop('''input_ids''' )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def lowercase__ ( self : List[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Union[str, Any], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 709
|
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE__ : List[str] = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _a ( lowercase__ : int ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Optional[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 85
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=1_3 , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase_ : Optional[int]=[2, 2, 3, 2] , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=3_7 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Any=1_0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Dict=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
a : Dict = parent
a : str = batch_size
a : Union[str, Any] = image_size
a : Optional[int] = num_channels
a : Dict = num_stages
a : Optional[int] = hidden_sizes
a : Optional[int] = depths
a : Optional[Any] = is_training
a : Any = use_labels
a : Dict = intermediate_size
a : str = hidden_act
a : str = type_sequence_label_size
a : Dict = initializer_range
a : Union[str, Any] = out_features
a : Dict = num_labels
a : List[str] = scope
a : Tuple = num_stages
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : Union[str, Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase_ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : List[Any] = UperNetForSemanticSegmentation(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = self.prepare_config_and_inputs()
(
a
) : Tuple = config_and_inputs
a : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
A : Optional[int] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A : List[str] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
A : Union[str, Any] = False
A : Optional[int] = False
A : Optional[Any] = False
A : Dict = False
A : Tuple = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = UperNetModelTester(self)
a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : int = [*signature.parameters.keys()]
a : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@unittest.skip(reason='UperNet does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not support input and output embeddings')
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not have a base model')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason='UperNet does not have a base model')
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]):
a : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[int] = _config_zero_init(UpperCAmelCase_)
a : List[str] = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
a : Tuple = model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
a : Tuple = Image.open(_lowerCamelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny')
a : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny').to(UpperCAmelCase_)
a : int = prepare_img()
a : Dict = processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
with torch.no_grad():
a : str = model(**UpperCAmelCase_)
a : Dict = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Dict = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny')
a : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny').to(UpperCAmelCase_)
a : Optional[int] = prepare_img()
a : Dict = processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
a : Any = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Optional[int] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _snake_case (__lowercase):
return (data["data"], data["target"])
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = XGBClassifier()
classifier.fit(__lowercase , __lowercase)
return classifier
def _snake_case ():
UpperCamelCase_ = load_iris()
UpperCamelCase_ , UpperCamelCase_ = data_handling(__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = train_test_split(
__lowercase , __lowercase , test_size=0.25)
UpperCamelCase_ = iris['target_names']
# Create an XGBoost Classifier from the training data
UpperCamelCase_ = xgboost(__lowercase , __lowercase)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowercase , __lowercase , __lowercase , display_labels=__lowercase , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset')
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 23
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = projection_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = bos_token_id
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase__ = input_mask.numpy()
UpperCamelCase__ , UpperCamelCase__ = input_mask.shape
UpperCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =(TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BlipTextModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _a (self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
def _a (self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _a (self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _a (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _a (self ) -> Any:
'''simple docstring'''
pass
@slow
def _a (self ) -> Dict:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_=True ) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE_ )
| 469
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =""
SCREAMING_SNAKE_CASE_ : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE_ : str =None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE_ : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ = fsspec.open(
SCREAMING_SNAKE_CASE_ , mode='''rb''' , protocol=SCREAMING_SNAKE_CASE_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase__ = os.path.basename(self.file.path.split('''::''' )[0] )
UpperCamelCase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ = None
@classmethod
def _a (cls , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return super()._strip_protocol(SCREAMING_SNAKE_CASE_ ).lstrip('''/''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
UpperCamelCase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
UpperCamelCase__ = {f['''name''']: f}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.file.open().read()
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple ="bz2"
SCREAMING_SNAKE_CASE_ : List[str] ="bz2"
SCREAMING_SNAKE_CASE_ : List[Any] =".bz2"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple ="gzip"
SCREAMING_SNAKE_CASE_ : Dict ="gzip"
SCREAMING_SNAKE_CASE_ : List[Any] =".gz"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] ="lz4"
SCREAMING_SNAKE_CASE_ : str ="lz4"
SCREAMING_SNAKE_CASE_ : Any =".lz4"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict ="xz"
SCREAMING_SNAKE_CASE_ : List[str] ="xz"
SCREAMING_SNAKE_CASE_ : Optional[int] =".xz"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="zstd"
SCREAMING_SNAKE_CASE_ : Optional[int] ="zstd"
SCREAMING_SNAKE_CASE_ : Optional[int] =".zst"
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = DEFAULT_BLOCK_SIZE , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
fo=SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , target_protocol=SCREAMING_SNAKE_CASE_ , target_options=SCREAMING_SNAKE_CASE_ , block_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ = self.file.__enter__
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = file_
def __enter__(self ) -> Union[str, Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
self._file.__exit__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __iter__(self ) -> Dict:
'''simple docstring'''
return iter(self._file )
def _a (self ) -> Dict:
'''simple docstring'''
return next(self._file )
def __getattr__(self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return getattr(self._file , SCREAMING_SNAKE_CASE_ )
def fixed_enter(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return WrappedFile(_enter(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = fixed_enter
| 469
| 1
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowercase ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
if gpta_config_file == "":
UpperCamelCase__ : Union[str, Any] = GPTaConfig()
else:
UpperCamelCase__ : Optional[Any] = GPTaConfig.from_json_file(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# Save pytorch-model
UpperCamelCase__ : List[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,__lowerCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 344
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Union[List[PIL.Image.Image], np.ndarray]
a__ : Optional[List[bool]]
a__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 344
| 1
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase ( unittest.TestCase ):
UpperCAmelCase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase (self : Tuple , _A : List[Any] , _A : Union[str, Any] , _A : List[Any]) -> Optional[Any]:
__snake_case : Optional[Any] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset')
__snake_case : List[Any] = VideoClassificationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ , top_k=2)
__snake_case : Any = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowercase (self : Optional[int] , _A : List[Any] , _A : Tuple) -> List[Any]:
for example in examples:
__snake_case : Optional[int] = video_classifier(lowerCamelCase__)
self.assertEqual(
lowerCamelCase__ , [
{'score': ANY(lowerCamelCase__), 'label': ANY(lowerCamelCase__)},
{'score': ANY(lowerCamelCase__), 'label': ANY(lowerCamelCase__)},
] , )
@require_torch
def _lowercase (self : Any) -> List[Any]:
__snake_case : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
__snake_case : List[str] = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10})
__snake_case : List[Any] = pipeline(
'video-classification' , model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , frame_sampling_rate=4)
__snake_case : List[str] = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset')
__snake_case : Union[str, Any] = video_classifier(lowerCamelCase__ , top_k=2)
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4) , [{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}] , )
__snake_case : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4) , [
[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],
[{'score': 0.5_199, 'label': 'LABEL_0'}, {'score': 0.4_801, 'label': 'LABEL_1'}],
] , )
@require_tf
def _lowercase (self : List[str]) -> Optional[int]:
pass
| 718
|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int]= get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = XLMProphetNetTokenizer
UpperCAmelCase : int = False
UpperCAmelCase : List[Any] = True
def _lowercase (self : Dict) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Tuple = XLMProphetNetTokenizer(_A , keep_accents=_A)
tokenizer.save_pretrained(self.tmpdirname)
def _lowercase (self : str) -> Optional[int]:
__snake_case : List[str] = '[PAD]'
__snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A)
def _lowercase (self : Dict) -> str:
__snake_case : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '[PAD]')
self.assertEqual(vocab_keys[1] , '[CLS]')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(_A) , 10_12)
def _lowercase (self : Dict) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12)
def _lowercase (self : str) -> Dict:
__snake_case : int = XLMProphetNetTokenizer(_A , keep_accents=_A)
__snake_case : Tuple = tokenizer.tokenize('This is a test')
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(_A)
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(_A)
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def _lowercase (self : List[Any]) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased')
@slow
def _lowercase (self : Tuple) -> Any:
__snake_case : Dict = 'Hello World!'
__snake_case : List[str] = [3_53_89, 66_72, 49, 2]
self.assertListEqual(_A , self.big_tokenizer.encode(_A))
@slow
def _lowercase (self : Union[str, Any]) -> List[Any]:
# fmt: off
__snake_case : Any = {'input_ids': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 192
| 0
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = (1 - _cos) / 2
_A = 1 - _cos
_A = 1 + alpha
_A = -2 * _cos
_A = 1 - alpha
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = (1 + _cos) / 2
_A = -1 - _cos
_A = 1 + alpha
_A = -2 * _cos
_A = 1 - alpha
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = _sin / 2
_A = 0
_A = -ba
_A = 1 + alpha
_A = -2 * _cos
_A = 1 - alpha
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = 1 - alpha
_A = -2 * _cos
_A = 1 + alpha
_A = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = 1_0 ** (gain_db / 4_0)
_A = 1 + alpha * big_a
_A = -2 * _cos
_A = 1 - alpha * big_a
_A = 1 + alpha / big_a
_A = -2 * _cos
_A = 1 - alpha / big_a
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = 1_0 ** (gain_db / 4_0)
_A = (big_a + 1) - (big_a - 1) * _cos
_A = (big_a + 1) + (big_a - 1) * _cos
_A = (big_a - 1) - (big_a + 1) * _cos
_A = (big_a - 1) + (big_a + 1) * _cos
_A = 2 * sqrt(__snake_case ) * alpha
_A = big_a * (pmc + aaa)
_A = 2 * big_a * mpc
_A = big_a * (pmc - aaa)
_A = ppmc + aaa
_A = -2 * pmpc
_A = ppmc - aaa
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ):
_A = tau * frequency / samplerate
_A = sin(__snake_case )
_A = cos(__snake_case )
_A = _sin / (2 * q_factor)
_A = 1_0 ** (gain_db / 4_0)
_A = (big_a + 1) - (big_a - 1) * _cos
_A = (big_a + 1) + (big_a - 1) * _cos
_A = (big_a - 1) - (big_a + 1) * _cos
_A = (big_a - 1) + (big_a + 1) * _cos
_A = 2 * sqrt(__snake_case ) * alpha
_A = big_a * (ppmc + aaa)
_A = -2 * big_a * pmpc
_A = big_a * (ppmc - aaa)
_A = pmc + aaa
_A = 2 * mpc
_A = pmc - aaa
_A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 107
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ : List[Any] = {'facebook/blenderbot_small-90M': 5_12}
def __a ( __UpperCAmelCase ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
a__ = set(__UpperCAmelCase )
return pairs
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="__start__" , SCREAMING_SNAKE_CASE="__end__" , SCREAMING_SNAKE_CASE="__unk__" , SCREAMING_SNAKE_CASE="__null__" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
a__ = json.load(SCREAMING_SNAKE_CASE )
a__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
a__ = merges_handle.read().split('''\n''' )[1:-1]
a__ = [tuple(merge.split() ) for merge in merges]
a__ = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
a__ = {}
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
if token in self.cache:
return self.cache[token]
a__ = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE )
a__ = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE )
a__ = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE )
if "\n" in token:
a__ = token.replace('''\n''' , ''' __newln__''' )
a__ = token.split(''' ''' )
a__ = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE ):
continue
a__ = token.lower()
a__ = tuple(SCREAMING_SNAKE_CASE )
a__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a__ = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
words.append(SCREAMING_SNAKE_CASE )
continue
while True:
a__ = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
a__ = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
a__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(SCREAMING_SNAKE_CASE )
a__ = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
a__ = get_pairs(SCREAMING_SNAKE_CASE )
a__ = '''@@ '''.join(SCREAMING_SNAKE_CASE )
a__ = word[:-4]
a__ = word
words.append(SCREAMING_SNAKE_CASE )
return " ".join(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = []
a__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
a__ = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
a__ = ''' '''.join(SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a__ = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a__ = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + '''\n''' )
a__ = 0
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
a__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 194
| 0
|
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Optional[Any] = VQModel
snake_case__ :str = 'sample'
@property
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict=(32, 32) ):
"""simple docstring"""
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
return {"sample": image}
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__magic_name__ )
lowerCAmelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__magic_name__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase__ = image.to(__magic_name__ )
with torch.no_grad():
lowerCAmelCase__ = model(__magic_name__ ).sample
lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
| 48
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Union[str, Any] = GenerationConfig.from_model_config(A )
lowerCamelCase_ : str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = GenerationConfig()
lowerCamelCase_ : List[str] = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : List[str] = copy.deepcopy(A )
lowerCamelCase_ : Union[str, Any] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
lowerCamelCase_ : Optional[Any] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Tuple = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : List[str] = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : int = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : int = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Union[str, Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 357
|
'''simple docstring'''
def lowercase_ ( _lowercase = 1_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Any = -1
lowerCamelCase_ : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase_ : Any = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase_ : int = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase_ : int = a * b * c
if candidate >= product:
lowerCamelCase_ : Any = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 357
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.