code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = MvpTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = MvpTokenizerFast
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Any = filter_roberta_detectors
def a_ ( self ) -> Dict:
super().setUp()
UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase = {'unk_token': '<unk>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def a_ ( self , **lowercase_ ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def a_ ( self , **lowercase_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def a_ ( self , lowercase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def a_ ( self ) -> str:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a_ ( self ) -> Optional[int]:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a_ ( self ) -> Any:
UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors='pt' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
# Test that special tokens are reset
@require_torch
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , lowercase_ )
self.assertIn('attention_mask' , lowercase_ )
self.assertNotIn('labels' , lowercase_ )
self.assertNotIn('decoder_attention_mask' , lowercase_ )
@require_torch
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowercase_ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def a_ ( self ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def a_ ( self ) -> Tuple:
UpperCAmelCase = ['A long paragraph for summarization.']
UpperCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowercase_ , text_target=lowercase_ , return_tensors='pt' )
UpperCAmelCase = inputs['input_ids']
UpperCAmelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a_ ( self ) -> Optional[int]:
pass
def a_ ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
UpperCAmelCase = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 373
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = "falcon"
__SCREAMING_SNAKE_CASE : List[Any] = ["past_key_values"]
def __init__( self , lowercase_=6_5_0_2_4 , lowercase_=4_5_4_4 , lowercase_=3_2 , lowercase_=7_1 , lowercase_=1E-5 , lowercase_=0.0_2 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=1_1 , lowercase_=1_1 , **lowercase_ , ) -> Any:
UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase = kwargs.pop('n_embed' , lowercase_ )
UpperCAmelCase = hidden_size if n_embed is None else n_embed
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase = alibi
UpperCAmelCase = new_decoder_architecture
UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase = parallel_attn
UpperCAmelCase = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def a_ ( self ) -> List[str]:
return not self.alibi
| 373
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase :
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1000 , ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : str = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : int = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[int] = type_vocab_size
__lowerCAmelCase : Any = type_sequence_label_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Tuple = coordinate_size
__lowerCAmelCase : Optional[int] = shape_size
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase : int = text_seq_length
__lowerCAmelCase : Tuple = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase : Dict = self.text_seq_length + self.image_seq_length
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase : List[Any] = bbox[i, j, 3]
__lowerCAmelCase : Dict = bbox[i, j, 1]
__lowerCAmelCase : Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase : List[Any] = bbox[i, j, 2]
__lowerCAmelCase : str = bbox[i, j, 0]
__lowerCAmelCase : Any = tmp_coordinate
__lowerCAmelCase : Union[str, Any] = tf.constant(A_ )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Any = None
if self.use_input_mask:
__lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = TFLayoutLMvaModel(config=A_ )
# text + image
__lowerCAmelCase : int = model(A_ , pixel_values=A_ , training=A_ )
__lowerCAmelCase : Tuple = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , training=A_ , )
__lowerCAmelCase : str = model(A_ , bbox=A_ , pixel_values=A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase : List[str] = model(A_ , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase : Tuple = model({'''pixel_values''': pixel_values} , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.num_labels
__lowerCAmelCase : List[Any] = TFLayoutLMvaForSequenceClassification(config=A_ )
__lowerCAmelCase : Optional[int] = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : List[str] = TFLayoutLMvaForTokenClassification(config=A_ )
__lowerCAmelCase : List[str] = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 2
__lowerCAmelCase : Any = TFLayoutLMvaForQuestionAnswering(config=A_ )
__lowerCAmelCase : int = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , training=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
((__lowerCAmelCase), (__lowerCAmelCase), (__lowerCAmelCase), (__lowerCAmelCase), (__lowerCAmelCase), (__lowerCAmelCase), (__lowerCAmelCase), (__lowerCAmelCase)) : List[str] = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ ) ->List[str]:
'''simple docstring'''
return True
def UpperCamelCase__ ( self , A_ , A_ , A_=False ) ->dict:
'''simple docstring'''
__lowerCAmelCase : str = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
__lowerCAmelCase : Tuple = {
k: tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
__lowerCAmelCase : Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__lowerCAmelCase : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__lowerCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__lowerCAmelCase : str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = TFLayoutLMvaModelTester(self )
__lowerCAmelCase : int = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = model_class(A_ )
if getattr(A_ , '''hf_compute_loss''' , A_ ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase : Any = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__lowerCAmelCase : Optional[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=A_ )[0]
]
__lowerCAmelCase : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase : int = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__lowerCAmelCase : List[Any] = prepared_for_class.pop('''input_ids''' )
__lowerCAmelCase : Tuple = model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__lowerCAmelCase : Optional[int] = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
__lowerCAmelCase : List[str] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase : str = -100
__lowerCAmelCase : List[Any] = tf.convert_to_tensor(A_ )
__lowerCAmelCase : str = model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase : str = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__lowerCAmelCase : List[Any] = model(A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase : List[str] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase : List[str] = inspect.signature(model.call ).parameters
__lowerCAmelCase : Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase : Optional[int] = {0: '''input_ids'''}
for label_key in label_keys:
__lowerCAmelCase : Optional[Any] = signature_names.index(A_ )
__lowerCAmelCase : List[str] = label_key
__lowerCAmelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase : List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase : Any = prepared_for_class[value]
__lowerCAmelCase : List[Any] = tuple(A_ )
# Send to model
__lowerCAmelCase : Optional[int] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
@slow
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Any = TFLayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _lowercase ( ):
__lowerCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class __lowercase (unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
__lowerCAmelCase : Optional[Any] = self.default_image_processor
__lowerCAmelCase : Dict = prepare_img()
__lowerCAmelCase : Any = image_processor(images=A_ , return_tensors='''tf''' ).pixel_values
__lowerCAmelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase : Optional[int] = model(input_ids=A_ , bbox=A_ , pixel_values=A_ , training=A_ )
# verify the logits
__lowerCAmelCase : Union[str, Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , A_ )
__lowerCAmelCase : Union[str, Any] = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) )
| 583
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=128 , A_=32 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : List[str] = use_token_type_ids
__lowerCAmelCase : List[Any] = use_labels
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = max_position_embeddings
__lowerCAmelCase : Optional[int] = type_vocab_size
__lowerCAmelCase : Dict = type_sequence_label_size
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Optional[int] = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Tuple = scope
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : str = self.prepare_config_and_inputs()
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = NezhaModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ )
__lowerCAmelCase : Optional[int] = model(A_ , token_type_ids=A_ )
__lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Dict = NezhaModel(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
__lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , )
__lowerCAmelCase : int = model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = NezhaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = NezhaForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = NezhaForPreTraining(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : List[str] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = NezhaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Optional[int] = NezhaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : int = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : int = NezhaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.num_choices
__lowerCAmelCase : int = NezhaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Optional[int] = config_and_inputs
__lowerCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
def UpperCamelCase__ ( self , A_ , A_ , A_=False ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
__lowerCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
__lowerCAmelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = NezhaModelTester(self )
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCAmelCase : Tuple = None
self.model_tester.create_and_check_model_as_decoder(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[Any] = NezhaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(config=A_ )
__lowerCAmelCase : List[Any] = self._prepare_for_class(A_ , A_ )
__lowerCAmelCase : Tuple = torch.jit.trace(
A_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , '''bert.pt''' ) )
__lowerCAmelCase : Optional[int] = torch.jit.load(os.path.join(A_ , '''bert.pt''' ) , map_location=A_ )
loaded(inputs_dict['''input_ids'''].to(A_ ) , inputs_dict['''attention_mask'''].to(A_ ) )
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowerCAmelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(A_ , attention_mask=A_ )[0]
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(A_ , attention_mask=A_ )[0]
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : str = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) )
| 583
| 1
|
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return number | (1 << position)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return number & ~(1 << position)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return number ^ (1 << position)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> bool:
return ((number >> position) & 1) == 1
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[int] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase_: Tuple = test_metrics
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ):
self.test_metrics.main()
@require_multi_gpu
def _a ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: List[Any] = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
| 57
| 1
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A__ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( __snake_case ) -> str:
__lowerCAmelCase : Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : Any = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MaskGenerationPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> int:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF")
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge")
__lowerCAmelCase : int = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256)
# Shortening by hashing
__lowerCAmelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"]):
new_outupt += [{"mask": mask_to_test_readable(_SCREAMING_SNAKE_CASE), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = "facebook/sam-vit-huge"
__lowerCAmelCase : str = pipeline("mask-generation" , model=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__lowerCAmelCase : Tuple = []
for i, o in enumerate(outputs["masks"]):
new_outupt += [{"mask": mask_to_test_readable(_SCREAMING_SNAKE_CASE), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053},
] , )
| 615
|
"""simple docstring"""
def _lowercase ( __snake_case = 3 ,__snake_case = 7 ,__snake_case = 1_000_000 ) -> int:
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : List[str] = 1
for current_denominator in range(1 ,limit + 1 ):
__lowerCAmelCase : Optional[int] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase : Union[str, Any] = current_numerator
__lowerCAmelCase : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 615
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> YolosConfig:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE__ :List[Any] = 1_9_2
SCREAMING_SNAKE_CASE__ :Tuple = 7_6_8
SCREAMING_SNAKE_CASE__ :int = 1_2
SCREAMING_SNAKE_CASE__ :int = 3
SCREAMING_SNAKE_CASE__ :str = [8_0_0, 1_3_3_3]
SCREAMING_SNAKE_CASE__ :Dict = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ :Optional[Any] = 3_3_0
SCREAMING_SNAKE_CASE__ :Any = 1_4
SCREAMING_SNAKE_CASE__ :Optional[int] = 6
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 1_3_2_0
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE__ :int = 3_8_4
SCREAMING_SNAKE_CASE__ :Tuple = 1_5_3_6
SCREAMING_SNAKE_CASE__ :int = 1_2
SCREAMING_SNAKE_CASE__ :Optional[int] = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE__ :Optional[int] = [8_0_0, 1_3_4_4]
SCREAMING_SNAKE_CASE__ :Any = 9_1
SCREAMING_SNAKE_CASE__ :Tuple = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ :Tuple = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE__ :int = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :List[str] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Any = idalabel
SCREAMING_SNAKE_CASE__ :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosConfig , UpperCAmelCase__ : bool = False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ :List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ :Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ :int = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__ :Tuple = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ :Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ :Tuple = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE__ :Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
'''simple docstring'''
if "backbone" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('backbone' , 'vit' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
SCREAMING_SNAKE_CASE__ :int = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ :Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ :Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ :Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ :List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ :Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ :int = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE__ :List[Any] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE__ :str = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : YolosForObjectDetection ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ :List[Any] = orig_state_dict.pop(_snake_case )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ :Any = key.split('.' )
SCREAMING_SNAKE_CASE__ :Optional[int] = int(key_split[2] )
SCREAMING_SNAKE_CASE__ :str = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ :List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE__ :Tuple = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ :Tuple = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ :Any = val[:dim]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ :List[str] = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ :Tuple = val
return orig_state_dict
def lowerCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = get_yolos_config(_snake_case )
# load original state_dict
SCREAMING_SNAKE_CASE__ :int = torch.load(_snake_case , map_location='cpu' )['model']
# load 🤗 model
SCREAMING_SNAKE_CASE__ :Tuple = YolosForObjectDetection(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE__ :Optional[int] = 8_0_0 if yolos_name != 'yolos_ti' else 5_1_2
SCREAMING_SNAKE_CASE__ :int = YolosImageProcessor(format='coco_detection' , size=_snake_case )
SCREAMING_SNAKE_CASE__ :Any = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ :Dict = model(**_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE__ :int = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE__ :Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
SCREAMING_SNAKE_CASE__ :Dict = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE__ :str = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE__ :str = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
SCREAMING_SNAKE_CASE__ :Any = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
SCREAMING_SNAKE_CASE__ :Dict = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
SCREAMING_SNAKE_CASE__ :Any = model_mapping[yolos_name]
image_processor.push_to_hub(_snake_case , organization='hustvl' )
model.push_to_hub(_snake_case , organization='hustvl' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 209
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_A : Any = logging.get_logger(__name__)
_A : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_A : Union[str, Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = {}
with open(UpperCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = line.strip()
if line:
lowerCamelCase__ : int = line.split()
lowerCamelCase__ : Dict = line_number
lowerCamelCase__ : Union[str, Any] = words[0]
lowerCamelCase__ : str = value
return result
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : str = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
lowerCamelCase__ : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowerCamelCase__ : str = '''param'''
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : Any = getattr(UpperCAmelCase , UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Any = hf_pointer
for attribute in hf_param_name.split('''.''' ):
lowerCamelCase__ : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[str] = shape_pointer.shape
# let's reduce dimension
lowerCamelCase__ : Dict = value[0]
else:
lowerCamelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCamelCase__ : int = value
elif weight_type == "weight_g":
lowerCamelCase__ : Tuple = value
elif weight_type == "weight_v":
lowerCamelCase__ : List[str] = value
elif weight_type == "bias":
lowerCamelCase__ : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
lowerCamelCase__ : List[str] = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = value
else:
lowerCamelCase__ : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowerCamelCase__ : str = '''param'''
if weight_type is not None and weight_type != "param":
lowerCamelCase__ : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase__ : Dict = '''.'''.join([key, hf_param_name] )
else:
lowerCamelCase__ : Union[str, Any] = key
lowerCamelCase__ : List[Any] = value if '''lm_head''' in full_key else value[0]
_A : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = False
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Optional[int] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ : Tuple = True
if "*" in mapped_key:
lowerCamelCase__ : Dict = name.split(UpperCAmelCase )[0].split('''.''' )[-2]
lowerCamelCase__ : List[str] = mapped_key.replace('''*''' , UpperCAmelCase )
if "weight_g" in name:
lowerCamelCase__ : str = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ : Dict = '''weight_v'''
elif "bias" in name:
lowerCamelCase__ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Optional[int] = '''weight'''
else:
lowerCamelCase__ : Optional[Any] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return is_used
return is_used
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[str] = fairseq_model.state_dict()
lowerCamelCase__ : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase__ : List[Any] = True
else:
lowerCamelCase__ : Optional[Any] = load_wavaveca_layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Dict = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ : str = name.split('''.''' )
lowerCamelCase__ : List[Any] = int(items[0] )
lowerCamelCase__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
lowerCamelCase__ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
lowerCamelCase__ : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
lowerCamelCase__ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
lowerCamelCase__ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__ : int = WavaVecaConfig.from_pretrained(UpperCAmelCase )
else:
lowerCamelCase__ : int = WavaVecaConfig()
if is_seq_class:
lowerCamelCase__ : Dict = read_txt_into_dict(UpperCAmelCase )
lowerCamelCase__ : List[str] = idalabel
lowerCamelCase__ : Any = WavaVecaForSequenceClassification(UpperCAmelCase )
lowerCamelCase__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
feature_extractor.save_pretrained(UpperCAmelCase )
elif is_finetuned:
if dict_path:
lowerCamelCase__ : Tuple = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : str = target_dict.pad_index
lowerCamelCase__ : Any = target_dict.bos_index
lowerCamelCase__ : str = target_dict.eos_index
lowerCamelCase__ : Union[str, Any] = len(target_dict.symbols )
lowerCamelCase__ : Optional[int] = os.path.join(UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Any = 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCAmelCase , )
lowerCamelCase__ : str = True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Any = WavaVecaForCTC(UpperCAmelCase )
else:
lowerCamelCase__ : Any = WavaVecaForPreTraining(UpperCAmelCase )
if is_finetuned or is_seq_class:
lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase__ : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' )
lowerCamelCase__ : List[Any] = fairseq.tasks.setup_task(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
lowerCamelCase__ : List[str] = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_A : Tuple = parser.parse_args()
_A : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 701
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
lowerCamelCase__ : int = int(number**0.5 )
return number == sq * sq
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[int, int]:
"""simple docstring"""
lowerCamelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase__ : int = x_den * y_den * z_den
lowerCamelCase__ : int = gcd(UpperCAmelCase , UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( UpperCAmelCase = 35 ) -> int:
"""simple docstring"""
lowerCamelCase__ : set = set()
lowerCamelCase__ : int
lowerCamelCase__ : Fraction = Fraction(0 )
lowerCamelCase__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCamelCase__ : Optional[Any] = x_num * y_den + x_den * y_num
lowerCamelCase__ : Dict = x_den * y_den
lowerCamelCase__ : Union[str, Any] = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : List[Any] = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowerCamelCase__ : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase__ : Optional[Any] = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowerCamelCase__ : List[str] = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : Any = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : Tuple = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=-1
lowerCamelCase__ : Optional[Any] = x_num * y_num
lowerCamelCase__ : List[Any] = x_den * y_num + x_num * y_den
lowerCamelCase__ : Union[str, Any] = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : str = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowerCamelCase__ : Optional[int] = x_num * x_num * y_num * y_num
lowerCamelCase__ : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowerCamelCase__ : List[str] = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : Dict = int(sqrt(UpperCAmelCase ) )
lowerCamelCase__ : str = gcd(UpperCAmelCase , UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase__ : Optional[Any] = add_three(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
unique_s.add(UpperCAmelCase )
for num, den in unique_s:
total += Fraction(UpperCAmelCase , UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 130
| 0
|
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 41
|
import torch
from diffusers import DiffusionPipeline
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a ) -> List[str]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
def __call__( self ) -> Tuple:
_A : List[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A : Tuple = 1
_A : List[str] = self.unet(_a , _a ).sample
_A : List[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
_A : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 307
| 0
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if args.student_type == "roberta":
snake_case__ : int = False
elif args.student_type == "gpt2":
snake_case__ : List[Any] = False
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if args.student_type == "roberta":
snake_case__ : Optional[Any] = False
def lowerCAmelCase__ ( ):
"""simple docstring"""
snake_case__ : Any = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCAmelCase , type=UpperCAmelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCAmelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCAmelCase , required=UpperCAmelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCAmelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.1_5 , type=UpperCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCAmelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCAmelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCAmelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCAmelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=UpperCAmelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCAmelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=UpperCAmelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=UpperCAmelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCAmelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.0_2 , type=UpperCAmelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCAmelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCAmelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCAmelCase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCAmelCase , default=4000 , help="""Checkpoint interval.""" )
snake_case__ : str = parser.parse_args()
sanity_checks(UpperCAmelCase )
# ARGS #
init_gpu_params(UpperCAmelCase )
set_seed(UpperCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCAmelCase ) , UpperCAmelCase , indent=4 )
git_log(args.dump_path )
snake_case__ : Any = MODEL_CLASSES[args.student_type]
snake_case__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case__ : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case__ : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case__ : str = tokenizer.all_special_tokens.index(UpperCAmelCase )
snake_case__ : str = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
snake_case__ : Union[str, Any] = special_tok_ids
snake_case__ : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
snake_case__ : Union[str, Any] = pickle.load(UpperCAmelCase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
snake_case__ : Optional[int] = pickle.load(UpperCAmelCase )
snake_case__ : List[Any] = np.maximum(UpperCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case__ : Dict = 0.0 # do not predict special tokens
snake_case__ : List[str] = torch.from_numpy(UpperCAmelCase )
else:
snake_case__ : int = None
snake_case__ : Optional[int] = LmSeqsDataset(params=UpperCAmelCase , data=UpperCAmelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
snake_case__ : str = student_config_class.from_pretrained(args.student_config )
snake_case__ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case__ : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCAmelCase )
else:
snake_case__ : List[Any] = student_model_class(UpperCAmelCase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
snake_case__ : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCAmelCase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCAmelCase , UpperCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCAmelCase , UpperCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case__ : Optional[Any] = Distiller(
params=UpperCAmelCase , dataset=UpperCAmelCase , token_probs=UpperCAmelCase , student=UpperCAmelCase , teacher=UpperCAmelCase )
distiller.train()
logger.info("""Let\'s go get some drinks.""" )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ = 2
class _A :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : str="<pad>" , lowerCamelCase : str="</s>" , lowerCamelCase : int="<unk>" , lowerCamelCase : Tuple=None , )-> str:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = bos, unk, pad, eos
snake_case__ : Dict = []
snake_case__ : int = []
snake_case__ : Optional[int] = {}
snake_case__ : int = self.add_symbol(lowerCamelCase )
snake_case__ : Optional[int] = self.add_symbol(lowerCamelCase )
snake_case__ : List[str] = self.add_symbol(lowerCamelCase )
snake_case__ : int = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
snake_case__ : int = len(self.symbols )
def __eq__( self : str , lowerCamelCase : Tuple )-> Optional[Any]:
return self.indices == other.indices
def __getitem__( self : Optional[int] , lowerCamelCase : Any )-> Tuple:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Any )-> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Tuple , lowerCamelCase : int )-> int:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls : Dict , lowerCamelCase : Union[str, Any] )-> str:
snake_case__ : List[str] = cls()
d.add_from_file(lowerCamelCase )
return d
def __lowerCAmelCase ( self : int , lowerCamelCase : int , lowerCamelCase : List[Any]=1 , lowerCamelCase : Union[str, Any]=False )-> Any:
if word in self.indices and not overwrite:
snake_case__ : Union[str, Any] = self.indices[word]
snake_case__ : str = self.count[idx] + n
return idx
else:
snake_case__ : Any = len(self.symbols )
snake_case__ : Optional[int] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __lowerCAmelCase ( self : Any , lowerCamelCase : List[Any] )-> Dict:
return 0
def __lowerCAmelCase ( self : int , lowerCamelCase : str )-> Optional[int]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(lowerCamelCase ) )
return
snake_case__ : Union[str, Any] = f.readlines()
snake_case__ : Optional[Any] = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
snake_case__ , snake_case__ : Optional[int] = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
snake_case__ : str = True
snake_case__ , snake_case__ : Any = line.rsplit(""" """ , 1 )
else:
snake_case__ : Dict = False
snake_case__ : Optional[int] = int(lowerCamelCase )
snake_case__ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : List[str] = dict((re.sub(R"""@@$""" , """""" , UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCAmelCase ), v) for k, v in d.items() )
snake_case__ : str = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
snake_case__ : Optional[Any] = d[k] # restore
return da
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
snake_case__ : Tuple = os.path.join(UpperCAmelCase , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
snake_case__ : str = torch.load(UpperCAmelCase , map_location="""cpu""" )
snake_case__ : List[Any] = chkpt["""cfg"""]["""model"""]
# dicts
snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
snake_case__ : List[str] = Dictionary.load(UpperCAmelCase )
snake_case__ : Optional[int] = rewrite_dict_keys(src_dict.indices )
snake_case__ : Tuple = len(UpperCAmelCase )
snake_case__ : Optional[Any] = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
snake_case__ : Union[str, Any] = os.path.join(UpperCAmelCase , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
snake_case__ : Tuple = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
snake_case__ : str = os.path.join(UpperCAmelCase , """config.json""" )
snake_case__ : Dict = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.0_2,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-1_2,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
snake_case__ : int = os.path.join(UpperCAmelCase , UpperCAmelCase )
snake_case__ : List[str] = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
snake_case__ : int = chkpt["""model"""]
# remove unneeded keys
snake_case__ : List[Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
snake_case__ : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
snake_case__ : str = model_state_dict.pop(UpperCAmelCase )
else:
snake_case__ : Optional[int] = model_state_dict.pop(UpperCAmelCase )
snake_case__ : Tuple = BioGptConfig.from_pretrained(UpperCAmelCase )
snake_case__ : Optional[int] = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
snake_case__ : Dict = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 172
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_lowerCamelCase : str = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
return image
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( A_ : str, A_ : Optional[Any], A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(A_ )
_lowerCamelCase : Dict = val
def snake_case_ ( A_ : List[Any], A_ : List[Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCamelCase : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
_lowerCamelCase : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_lowerCamelCase : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(A_, requires_grad=A_ ), v_bias) )
_lowerCamelCase : Any = qkv_bias
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = 3_64 if '''coco''' in model_name else 2_24
_lowerCamelCase : Optional[int] = InstructBlipVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowerCamelCase : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCamelCase : int = TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowerCamelCase : int = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''', vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
_lowerCamelCase : Dict = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''', vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowerCamelCase : Any = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
_lowerCamelCase : List[Any] = InstructBlipConfig(vision_config=A_, text_config=A_, qformer_config=A_ )
return config, image_size
@torch.no_grad()
def snake_case_ ( A_ : str, A_ : Any=None, A_ : str=False ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''bert-base-uncased''', truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
_lowerCamelCase : Dict = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''', truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowerCamelCase : Optional[int] = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''', truncation_side='''left''', bos_token='''</s>''', unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
_lowerCamelCase , _lowerCamelCase : Dict = get_blipa_config(A_ )
_lowerCamelCase : Dict = InstructBlipForConditionalGeneration(A_ ).eval()
_lowerCamelCase : Optional[int] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_lowerCamelCase , _lowerCamelCase : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
_lowerCamelCase : Tuple = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : Dict = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = load_model_and_preprocess(
name=A_, model_type=A_, is_eval=A_, device=A_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
_lowerCamelCase : Tuple = original_model.state_dict()
_lowerCamelCase : Tuple = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCamelCase : Tuple = state_dict.pop(A_ )
if key.startswith('''Qformer.bert''' ):
_lowerCamelCase : str = key.replace('''Qformer.bert''', '''qformer''' )
if "attention.self" in key:
_lowerCamelCase : List[Any] = key.replace('''self''', '''attention''' )
if "llm_proj" in key:
_lowerCamelCase : Dict = key.replace('''llm_proj''', '''language_projection''' )
if "t5_proj" in key:
_lowerCamelCase : List[Any] = key.replace('''t5_proj''', '''language_projection''' )
if key.startswith('''llm_model''' ):
_lowerCamelCase : Optional[int] = key.replace('''llm_model''', '''language_model''' )
if key.startswith('''t5''' ):
_lowerCamelCase : List[Any] = key.replace('''t5''', '''language''' )
_lowerCamelCase : List[Any] = val
# read in qv biases
read_in_q_v_bias(A_, A_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A_, strict=A_ )
_lowerCamelCase : str = load_demo_image()
_lowerCamelCase : Union[str, Any] = '''What is unusual about this image?'''
# create processor
_lowerCamelCase : Tuple = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size}, image_mean=A_, image_std=A_ )
_lowerCamelCase : List[str] = InstructBlipProcessor(
image_processor=A_, tokenizer=A_, qformer_tokenizer=A_, )
_lowerCamelCase : Dict = processor(images=A_, text=A_, return_tensors='''pt''' ).to(A_ )
# make sure processor creates exact same pixel values
_lowerCamelCase : Optional[Any] = vis_processors['''eval'''](A_ ).unsqueeze(0 ).to(A_ )
_lowerCamelCase : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "vicuna" in model_name:
_lowerCamelCase : List[Any] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
_lowerCamelCase : List[Any] = hf_model(**A_ ).logits
else:
_lowerCamelCase : Tuple = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
_lowerCamelCase : str = tokenizer('''\n''', return_tensors='''pt''' ).input_ids.to(A_ )
_lowerCamelCase : Optional[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -1_00 )
_lowerCamelCase : Any = hf_model(**A_, labels=A_ ).logits
print('''First values of original logits:''', original_logits[0, :3, :3] )
print('''First values of HF logits:''', logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowerCamelCase : Tuple = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ), A_, atol=A_ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
_lowerCamelCase : str = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
_lowerCamelCase : Tuple = hf_model.generate(
**A_, do_sample=A_, num_beams=5, max_length=2_56, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowerCamelCase : Union[str, Any] = 2
print('''Original generation:''', A_ )
_lowerCamelCase : Dict = processor.batch_decode(A_, skip_special_tokens=A_ )
_lowerCamelCase : str = [text.strip() for text in output_text]
print('''HF generation:''', A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 83
|
"""simple docstring"""
def __snake_case ( __A : int , __A : int ) -> float:
'''simple docstring'''
return base * power(__A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
A_ : str = int(input('Enter the base: ').strip())
A_ : Dict = int(input('Enter the exponent: ').strip())
A_ : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ : Any = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 265
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> bool:
A__ = False
if low == high:
return swapped
A__ = low
A__ = high
while left < right:
if collection[left] > collection[right]:
A__ , A__ = (
collection[right],
collection[left],
)
A__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A__ , A__ = (
collection[right + 1],
collection[left],
)
A__ = True
A__ = low + int((high - low) / 2 )
A__ = circle_sort_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = circle_sort_util(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
return swapped or left_swap or right_swap
A__ = True
while is_not_sorted is True:
A__ = circle_sort_util(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 702
|
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626
| 0
|
import numpy as np
def lowerCamelCase__ (_UpperCAmelCase):
return 1 / (1 + np.exp(-vector))
def lowerCamelCase__ (_UpperCAmelCase):
return vector * sigmoid(_UpperCAmelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431
| 0
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> Optional[int]:
super().__init__(*__A , **__A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self , __A=None , __A=None , __A=None ) -> Tuple:
_lowerCAmelCase ={}
_lowerCAmelCase ={}
if prompt is not None:
_lowerCAmelCase =prompt
if generate_kwargs is not None:
_lowerCAmelCase =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_lowerCAmelCase =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __A , **__A ) -> str:
return super().__call__(__A , **__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> Union[str, Any]:
_lowerCAmelCase =load_image(__A )
if prompt is not None:
if not isinstance(__A , __A ):
raise ValueError(
F'''Received an invalid text input, got - {type(__A )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_lowerCAmelCase =self.model.config.model_type
if model_type == "git":
_lowerCAmelCase =self.image_processor(images=__A , return_tensors=self.framework )
_lowerCAmelCase =self.tokenizer(text=__A , add_special_tokens=__A ).input_ids
_lowerCAmelCase =[self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase =torch.tensor(__A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase =self.image_processor(images=__A , header_text=__A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase =self.image_processor(images=__A , return_tensors=self.framework )
_lowerCAmelCase =self.tokenizer(__A , return_tensors=self.framework )
model_inputs.update(__A )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
_lowerCAmelCase =self.image_processor(images=__A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase =None
return model_inputs
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , __A )
and all(x is None for x in model_inputs['input_ids'] )
):
_lowerCAmelCase =None
if generate_kwargs is None:
_lowerCAmelCase ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase =model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase =self.model.generate(__A , **__A , **__A )
return model_outputs
def UpperCamelCase__ ( self , __A ) -> List[str]:
_lowerCAmelCase =[]
for output_ids in model_outputs:
_lowerCAmelCase ={
'generated_text': self.tokenizer.decode(
__A , skip_special_tokens=__A , )
}
records.append(__A )
return records
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :UNetaDModel
UpperCamelCase_ :ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : UNetaDModel , SCREAMING_SNAKE_CASE_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 2_000 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = self.unet.config.sample_size
lowerCAmelCase__ = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ = self.unet
lowerCAmelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase__ = self.scheduler.step_correct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
# prediction step
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase__ = self.scheduler.step_pred(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ = sample_mean.clamp(0 , 1 )
lowerCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 668
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'lxmert'
lowercase_ = {}
def __init__( self , UpperCAmelCase_=30522 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=9500 , UpperCAmelCase_=1600 , UpperCAmelCase_=400 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=9 , UpperCAmelCase_=5 , UpperCAmelCase_=5 , UpperCAmelCase_=2048 , UpperCAmelCase_=4 , UpperCAmelCase_=6.67 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> int:
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Any = hidden_size
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : Optional[Any] = type_vocab_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Tuple = num_qa_labels
lowerCamelCase : str = num_object_labels
lowerCamelCase : Any = num_attr_labels
lowerCamelCase : List[Any] = l_layers
lowerCamelCase : str = x_layers
lowerCamelCase : int = r_layers
lowerCamelCase : str = visual_feat_dim
lowerCamelCase : Optional[int] = visual_pos_dim
lowerCamelCase : Union[str, Any] = visual_loss_normalizer
lowerCamelCase : Tuple = task_matched
lowerCamelCase : Optional[Any] = task_mask_lm
lowerCamelCase : Union[str, Any] = task_obj_predict
lowerCamelCase : Tuple = task_qa
lowerCamelCase : List[str] = visual_obj_loss
lowerCamelCase : str = visual_attr_loss
lowerCamelCase : List[Any] = visual_feat_loss
lowerCamelCase : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**UpperCAmelCase_ )
| 133
|
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
lowerCamelCase : List[str] = float(a_ )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCamelCase : Dict = decimal - int(a_ )
if fractional_part == 0:
return int(a_ ), 1
else:
lowerCamelCase : Tuple = len(str(a_ ).split('.' )[1] )
lowerCamelCase : int = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase : List[str] = 10**number_of_frac_digits
lowerCamelCase , lowerCamelCase : int = denominator, numerator
while True:
lowerCamelCase : Tuple = dividend % divisor
if remainder == 0:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = divisor, remainder
lowerCamelCase , lowerCamelCase : Any = numerator / divisor, denominator / divisor
return int(a_ ), int(a_ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 133
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__a : Any = TypeVar('''T''')
class UpperCAmelCase( Generic[T] ):
"""simple docstring"""
def __init__( self , lowerCamelCase = True ) -> List[str]:
"""simple docstring"""
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : int = directed
def __a ( self , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
self.adj_list[destination_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
lowercase__ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__A )
lowercase__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : Tuple = [destination_vertex]
lowercase__ : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
lowercase__ : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : Optional[int] = [destination_vertex]
lowercase__ : List[Any] = []
return self
def __repr__( self ) -> Any:
"""simple docstring"""
return pformat(self.adj_list )
| 397
|
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
if hi < 0:
snake_case__ : Any = len(snake_case_ )
while lo < hi:
snake_case__ : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case__ : Optional[Any] = mid + 1
else:
snake_case__ : Optional[Any] = mid
return lo
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
if hi < 0:
snake_case__ : Union[str, Any] = len(snake_case_ )
while lo < hi:
snake_case__ : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case__ : Dict = mid + 1
else:
snake_case__ : int = mid
return lo
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[str] = 0
snake_case__ : Tuple = len(snake_case_ ) - 1
while left <= right:
snake_case__ : str = left + (right - left) // 2
snake_case__ : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case__ : int = midpoint - 1
else:
snake_case__ : Dict = midpoint + 1
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[Any] = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if right < left:
return None
snake_case__ : str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
__lowerCamelCase : str = sorted(int(item) for item in user_input.split(""","""))
__lowerCamelCase : List[Any] = int(input("""Enter a single number to be found in the list:\n"""))
__lowerCamelCase : Tuple = binary_search(collection, target)
if result is None:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at position {result} in {collection}.")
| 297
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__snake_case, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self : Any , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : Dict =size if size is not None else {"""shortest_edge""": 2_24}
A__ : Optional[int] =get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
A__ : List[Any] =crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
A__ : List[Any] =get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" )
A__ : Union[str, Any] =do_resize
A__ : Union[str, Any] =size
A__ : str =do_center_crop
A__ : Optional[int] =crop_size
A__ : str =resample
A__ : Tuple =do_rescale
A__ : Any =rescale_factor
A__ : Any =do_normalize
A__ : Dict =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ : int =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
A__ : List[Any] =get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
A__ : List[str] =get_resize_output_image_size(lowerCAmelCase_ , size["""shortest_edge"""] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
A__ : Optional[Any] =(size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
A__ : Dict =get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> Tuple:
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A__ : Tuple =to_numpy_array(lowerCAmelCase_ )
if do_resize:
A__ : Dict =self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
A__ : List[Any] =self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
A__ : Tuple =self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ )
if do_normalize:
A__ : Tuple =self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
A__ : Union[str, Any] =to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def lowercase__ ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> PIL.Image.Image:
'''simple docstring'''
A__ : str =do_resize if do_resize is not None else self.do_resize
A__ : Optional[Any] =resample if resample is not None else self.resample
A__ : str =do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
A__ : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : str =do_normalize if do_normalize is not None else self.do_normalize
A__ : Tuple =image_mean if image_mean is not None else self.image_mean
A__ : List[str] =image_std if image_std is not None else self.image_std
A__ : Union[str, Any] =size if size is not None else self.size
A__ : int =get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
A__ : Optional[Any] =crop_size if crop_size is not None else self.crop_size
A__ : Union[str, Any] =get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A__ : Dict =make_batched(lowerCAmelCase_ )
A__ : int =[
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
A__ : Any ={"""pixel_values""": videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 687
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687
| 1
|
def __snake_case ( _lowerCAmelCase : int ) -> bool:
return str(_lowerCAmelCase ) == str(_lowerCAmelCase )[::-1]
def __snake_case ( _lowerCAmelCase : int ) -> int:
return int(_lowerCAmelCase ) + int(str(_lowerCAmelCase )[::-1] )
def __snake_case ( _lowerCAmelCase : int = 10000 ) -> int:
A_ : List[Any] = []
for num in range(1 , _lowerCAmelCase ):
A_ : List[str] = 0
A_ : int = num
while iterations < 50:
A_ : List[str] = sum_reverse(_lowerCAmelCase )
iterations += 1
if is_palindrome(_lowerCAmelCase ):
break
else:
lychrel_nums.append(_lowerCAmelCase )
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 454
|
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : int ) -> int:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A_ : Dict = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A_ : Optional[Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A_ : List[Any] = len(_lowerCAmelCase )
A_ : List[Any] = 0
A_ : Any = 0
A_ : List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A_ : List[Any] = sorted_profit_by_weight[length - i - 1]
A_ : List[Any] = profit_by_weight.index(_lowerCAmelCase )
A_ : List[Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_lowerCAmelCase : int = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
_lowerCAmelCase : Dict = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
_lowerCAmelCase : Union[str, Any] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 454
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase__ = 4
UpperCamelCase__ = 3
class _UpperCamelCase ( A_ ):
'''simple docstring'''
pass
def A_( A ):
for shard in shards:
for i in range(A ):
yield {"i": i, "shard": shard}
def A_( ):
UpperCAmelCase_ = int(os.environ["""RANK"""] )
UpperCAmelCase_ = int(os.environ["""WORLD_SIZE"""] )
UpperCAmelCase_ = ArgumentParser()
parser.add_argument("""--streaming""" , type=A )
parser.add_argument("""--local_rank""" , type=A )
parser.add_argument("""--num_workers""" , type=A , default=0 )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.streaming
UpperCAmelCase_ = args.num_workers
UpperCAmelCase_ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(A )]}
UpperCAmelCase_ = IterableDataset.from_generator(A , gen_kwargs=A )
if not streaming:
UpperCAmelCase_ = Dataset.from_list(list(A ) )
UpperCAmelCase_ = split_dataset_by_node(A , rank=A , world_size=A )
UpperCAmelCase_ = torch.utils.data.DataLoader(A , num_workers=A )
UpperCAmelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 700
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Dict = 'dpr'
def __init__( self : List[str] , __magic_name__ : List[Any]=30522 , __magic_name__ : Optional[Any]=768 , __magic_name__ : str=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : List[str]=3072 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : int=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Any=1E-12 , __magic_name__ : Tuple=0 , __magic_name__ : Union[str, Any]="absolute" , __magic_name__ : int = 0 , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = position_embedding_type
| 48
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a : Optional[Any] = None
a : str = logging.get_logger(__name__)
a : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a : List[str] = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
a : Optional[int] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class a_ ( _UpperCAmelCase ):
a : Optional[int] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = ['input_ids', 'attention_mask']
a : str = MBartTokenizer
a : List[int] = []
a : List[int] = []
def __init__( self : str , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : Any="<pad>" , __UpperCamelCase : int="<mask>" , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Optional[int] , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_UpperCAmelCase = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCAmelCase = src_lang if src_lang is not None else """en_XX"""
_UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self : int , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : Any ) ->Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.convert_tokens_to_ids(__UpperCamelCase )
_UpperCAmelCase = tgt_lang_id
return inputs
def _snake_case ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en_XX" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro_RO" , **__UpperCamelCase : Union[str, Any] , ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.convert_tokens_to_ids(__UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.convert_tokens_to_ids(__UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 555
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def __a ( __UpperCAmelCase ):
a__ = DPTConfig()
if "large" in checkpoint_url:
a__ = 1024
a__ = 4096
a__ = 24
a__ = 16
a__ = [5, 11, 17, 23]
a__ = [256, 512, 1024, 1024]
a__ = (1, 384, 384)
if "ade" in checkpoint_url:
a__ = True
a__ = 150
a__ = '''huggingface/label-files'''
a__ = '''ade20k-id2label.json'''
a__ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
a__ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = [1, 150, 480, 480]
return config, expected_shape
def __a ( __UpperCAmelCase ):
a__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
a__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
a__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
a__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
a__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
a__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
a__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
a__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
a__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
a__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
a__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
a__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
a__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
a__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
a__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
a__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a__ = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
a__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
a__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
a__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
a__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
a__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
a__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
a__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
a__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
a__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
a__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
a__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
a__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
a__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
a__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
a__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
a__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
a__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
a__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
a__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
a__ = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[: config.hidden_size, :]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def __a ( ):
a__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ , a__ = get_dpt_config(__UpperCAmelCase )
# load original state_dict from URL
a__ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
a__ = state_dict.pop(__UpperCAmelCase )
a__ = val
# read in qkv matrices
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
a__ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# Check outputs on an image
a__ = 480 if '''ade''' in checkpoint_url else 384
a__ = DPTImageProcessor(size=__UpperCAmelCase )
a__ = prepare_img()
a__ = image_processor(__UpperCAmelCase , return_tensors='''pt''' )
# forward pass
a__ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth
# Assert logits
a__ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
a__ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(__UpperCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __UpperCAmelCase )
)
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a_ : str = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 721
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __UpperCamelCase :
"""simple docstring"""
_lowercase : Optional[Any] = LEDConfig
_lowercase : int = {}
_lowercase : Optional[Any] = '''gelu'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_3 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=2_0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=4 , ) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = eos_token_id
a__ = pad_token_id
a__ = bos_token_id
a__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _UpperCAmelCase ( self ) -> str:
a__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ = tf.concat([input_ids, eos_tensor] , axis=1 )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ = prepare_led_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = tf.concat(
[tf.zeros_like(SCREAMING_SNAKE_CASE )[:, :-1], tf.ones_like(SCREAMING_SNAKE_CASE )[:, -1:]] , axis=-1 , )
a__ = global_attention_mask
return config, inputs_dict
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = TFLEDModel(config=SCREAMING_SNAKE_CASE ).get_decoder()
a__ = inputs_dict['''input_ids''']
a__ = input_ids[:1, :]
a__ = inputs_dict['''attention_mask'''][:1, :]
a__ = 1
# first forward pass
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
a__ , a__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ = tf.concat([input_ids, next_tokens] , axis=-1 )
a__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ = output_from_no_past[:, -3:, random_slice_idx]
a__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1e-3 )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
if attention_mask is None:
a__ = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __UpperCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : int = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Any = True
_lowercase : Union[str, Any] = False
_lowercase : List[Any] = False
_lowercase : List[Any] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = TFLEDModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Dict:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = tf.zeros_like(inputs_dict['''attention_mask'''] )
a__ = 2
a__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
a__ = True
a__ = self.model_tester.seq_length
a__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(SCREAMING_SNAKE_CASE ):
a__ = outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(SCREAMING_SNAKE_CASE ):
a__ = [t.numpy() for t in outputs.encoder_attentions]
a__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ = True
a__ = False
a__ = False
a__ = model_class(SCREAMING_SNAKE_CASE )
a__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
a__ = len(SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
a__ = model_class(SCREAMING_SNAKE_CASE )
a__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ = True
a__ = model_class(SCREAMING_SNAKE_CASE )
a__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
a__ = True
a__ = True
a__ = model_class(SCREAMING_SNAKE_CASE )
a__ = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self ) -> int:
# TODO: Head-masking not yet implement
pass
def __a ( __UpperCAmelCase ):
return tf.constant(__UpperCAmelCase , dtype=tf.intaa )
a_ : Dict = 1E-4
@slow
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
a__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
a__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ = prepare_led_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = model(**SCREAMING_SNAKE_CASE )[0]
a__ = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
a__ = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 )
def _UpperCAmelCase ( self ) -> str:
a__ = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
a__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ = prepare_led_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = model(**SCREAMING_SNAKE_CASE )[0]
a__ = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
a__ = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 , rtol=1e-3 )
| 148
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> float:
_validate_point(UpperCAmelCase_ )
_validate_point(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : list[float] ) -> None:
if point:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for item in point:
if not isinstance(UpperCAmelCase_ , (int, float) ):
__lowerCamelCase : Optional[int] = (
'Expected a list of numbers as input, found '
F'{type(UpperCAmelCase_ ).__name__}'
)
raise TypeError(UpperCAmelCase_ )
else:
__lowerCamelCase : int = F'Expected a list of numbers as input, found {type(UpperCAmelCase_ ).__name__}'
raise TypeError(UpperCAmelCase_ )
else:
raise ValueError('Missing an input' )
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> float:
_validate_point(UpperCAmelCase_ )
_validate_point(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
import re
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> bool:
"""simple docstring"""
lowerCAmelCase = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
UpperCAmelCase = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 433
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A_ = logging.get_logger(__name__)
A_ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = "imagegpt"
A__ = ["past_key_values"]
A__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=512 + 1 , _lowerCAmelCase=32 * 32 , _lowerCAmelCase=512 , _lowerCAmelCase=24 , _lowerCAmelCase=8 , _lowerCAmelCase=None , _lowerCAmelCase="quick_gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , **_lowerCAmelCase , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = scale_attn_by_inverse_layer_idx
lowerCamelCase__ = reorder_and_upcast_attn
lowerCamelCase__ = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@property
def __magic_name__ ( self ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 3 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , ):
lowerCamelCase__ = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return inputs
| 360
|
def __UpperCamelCase ( a, a, a=False) ->Dict:
if isinstance(a, a) and isinstance(a, a):
lowerCamelCase__ = len(set_a.intersection(a))
if alternative_union:
lowerCamelCase__ = len(a) + len(a)
else:
lowerCamelCase__ = len(set_a.union(a))
return intersection / union
if isinstance(a, (list, tuple)) and isinstance(a, (list, tuple)):
lowerCamelCase__ = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ = len(a) + len(a)
return len(a) / union
else:
lowerCamelCase__ = set_a + [element for element in set_b if element not in set_a]
return len(a) / len(a)
return len(a) / len(a)
return None
if __name__ == "__main__":
A_ = {"a", "b", "c", "d", "e"}
A_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 360
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2]
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 9_6
elif "small" in model_name:
lowercase__ = 9_6
elif "base" in model_name:
lowercase__ = 1_2_8
elif "large" in model_name:
lowercase__ = 1_9_2
elif "xlarge" in model_name:
lowercase__ = 2_5_6
elif "huge" in model_name:
lowercase__ = 3_5_2
# set label information
lowercase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ = '''imagenet-22k-id2label.json'''
else:
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowercase ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=lowercase , depths=lowercase , focal_levels=lowercase , focal_windows=lowercase , use_conv_embed=lowercase , idalabel=lowercase , labelaid=lowercase , use_post_layernorm=lowercase , use_layerscale=lowercase , )
return config
def _lowerCAmelCase ( lowercase : Tuple ) ->str:
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "head" in name:
lowercase__ = name.replace('''head''' , '''classifier''' )
else:
lowercase__ = '''focalnet.''' + name
return name
def _lowerCAmelCase ( lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : List[str]=False ) ->int:
"""simple docstring"""
lowercase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , lowercase )
lowercase__ = torch.hub.load_state_dict_from_url(lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(lowercase )
lowercase__ = val
lowercase__ = get_focalnet_config(lowercase )
lowercase__ = FocalNetForImageClassification(lowercase )
model.eval()
# load state dict
model.load_state_dict(lowercase )
# verify conversion
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = BitImageProcessor(
do_resize=lowercase , size={'''shortest_edge''': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase , crop_size=2_2_4 , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , )
lowercase__ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
lowercase__ = processor(images=lowercase , return_tensors='''pt''' )
lowercase__ = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowercase__ = image_transforms(lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowercase , atol=1e-4 )
lowercase__ = model(**lowercase )
lowercase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
_lowerCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 161
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
def _lowerCAmelCase ( lowercase : int , lowercase : str , lowercase : LevitConfig , lowercase : Path , lowercase : bool = True ) ->List[Any]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
lowercase__ = timm.create_model('''levit_128s''' , pretrained=lowercase )
else:
lowercase__ = timm.create_model('''levit_128''' , pretrained=lowercase )
if hidden_sizes == 1_9_2:
lowercase__ = timm.create_model('''levit_192''' , pretrained=lowercase )
if hidden_sizes == 2_5_6:
lowercase__ = timm.create_model('''levit_256''' , pretrained=lowercase )
if hidden_sizes == 3_8_4:
lowercase__ = timm.create_model('''levit_384''' , pretrained=lowercase )
from_model.eval()
lowercase__ = LevitForImageClassificationWithTeacher(lowercase ).eval()
lowercase__ = OrderedDict()
lowercase__ = from_model.state_dict()
lowercase__ = list(from_model.state_dict().keys() )
lowercase__ = list(our_model.state_dict().keys() )
print(len(lowercase ) , len(lowercase ) )
for i in range(len(lowercase ) ):
lowercase__ = weights[og_keys[i]]
our_model.load_state_dict(lowercase )
lowercase__ = torch.randn((2, 3, 2_2_4, 2_2_4) )
lowercase__ = from_model(lowercase )
lowercase__ = our_model(lowercase ).logits
assert torch.allclose(lowercase , lowercase ), "The model logits don't match the original one."
lowercase__ = name
print(lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( lowercase : Path , lowercase : str = None , lowercase : bool = True ) ->int:
"""simple docstring"""
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = 1_0_0_0
lowercase__ = (1, num_labels)
lowercase__ = '''huggingface/label-files'''
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowercase ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
lowercase__ = {
'''levit-128S''': 1_2_8,
'''levit-128''': 1_2_8,
'''levit-192''': 1_9_2,
'''levit-256''': 2_5_6,
'''levit-384''': 3_8_4,
}
lowercase__ = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 161
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : str = tmp_path / """cache"""
a_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Optional[Any] = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[Any] = tmp_path / """cache"""
a_ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a_ : Union[str, Any] = features.copy() if features else default_expected_features
a_ : Any = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : Optional[int] = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : int = tmp_path / """cache"""
a_ : List[str] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
a_ : Tuple = features.copy() if features else default_expected_features
a_ : Optional[Any] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : int = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Any = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
a_ : Optional[Any] = features.copy()
a_ : Tuple = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : int = tmp_path / """cache"""
a_ : List[Any] = JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : List[str] = tmp_path / """cache"""
a_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a_ : str = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if issubclass(__UpperCamelCase , __UpperCamelCase ):
a_ : List[str] = jsonl_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
a_ : int = [jsonl_path]
a_ : str = tmp_path / """cache"""
a_ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a_ : Tuple = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
a_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : str = tmp_path / """cache"""
a_ : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a_ : Tuple = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Any = tmp_path / """cache"""
a_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a_ : Optional[Any] = features.copy() if features else default_expected_features
a_ : Dict = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a_ : Tuple = JsonDatasetReader({"""train""": jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if split:
a_ : int = {split: jsonl_path}
else:
a_ : Dict = """train"""
a_ : Optional[int] = {"""train""": jsonl_path, """test""": jsonl_path}
a_ : List[str] = tmp_path / """cache"""
a_ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
a_ : Union[str, Any] = JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a ( __UpperCamelCase ):
return json.load(__UpperCamelCase )
def _a ( __UpperCamelCase ):
return [json.loads(__UpperCamelCase ) for line in buffer]
class a__ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ ).write()
buffer.seek(0 )
a_ : List[Any] = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ ).write()
buffer.seek(0 )
a_ : Dict = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
a_ : Optional[int] = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
a_ : Dict = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
def UpperCAmelCase( self : Dict , lowerCamelCase_ : int ):
with pytest.raises(lowerCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ):
a_ : List[str] = tmp_path_factory.mktemp("""data""" ) / F'''test.json.{extension}'''
a_ : List[Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , compression=lowerCamelCase_ ).write()
with fsspec.open(lowerCamelCase_ , """rb""" , compression="""infer""" ) as f:
a_ : List[str] = f.read()
with fsspec.open(lowerCamelCase_ , """rb""" , compression="""infer""" ) as f:
a_ : Any = f.read()
assert exported_content == original_content
| 702
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def _a ( __UpperCamelCase=None , __UpperCamelCase=None ):
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class a__ :
lowerCamelCase__: List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
lowerCamelCase__: List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowerCamelCase__: List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Benchmark training of model"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Verbose memory tracing"""} )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Trace memory line by line"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Save result to a CSV file"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Save all print statements in a log file"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Whether to print environment information"""} )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
lowerCamelCase__: str = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
lowerCamelCase__: str = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
lowerCamelCase__: str = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
lowerCamelCase__: str = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
lowerCamelCase__: str = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
lowerCamelCase__: str = field(
default=F"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
lowerCamelCase__: int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCAmelCase( self : Any ):
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , lowerCamelCase_ , )
def UpperCAmelCase( self : Union[str, Any] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase( self : List[str] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase( self : Union[str, Any] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 478
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26
| 1
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = DownBlockaD # noqa F405
lowerCamelCase_ : int = """down"""
def __UpperCAmelCase( self ):
__A : Any = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase_ : List[str] = """down"""
def __UpperCAmelCase( self ):
__A : Any = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = AttnDownBlockaD # noqa F405
lowerCamelCase_ : Dict = """down"""
def __UpperCAmelCase( self ):
__A : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : int = CrossAttnDownBlockaD # noqa F405
lowerCamelCase_ : Any = """down"""
def __UpperCAmelCase( self ):
__A : Tuple = super().prepare_init_args_and_inputs_for_common()
__A : Dict = 32
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Any = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase_ : Any = """down"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Dict = super().prepare_init_args_and_inputs_for_common()
__A : List[str] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __UpperCAmelCase( self ):
__A : Tuple = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = SkipDownBlockaD # noqa F405
lowerCamelCase_ : Tuple = """down"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = AttnSkipDownBlockaD # noqa F405
lowerCamelCase_ : List[str] = """down"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = DownEncoderBlockaD # noqa F405
lowerCamelCase_ : int = """down"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = {
"in_channels": 32,
"out_channels": 32,
}
__A : Dict = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Tuple = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase_ : Any = """down"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = {
"in_channels": 32,
"out_channels": 32,
}
__A : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Union[str, Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = UNetMidBlockaD # noqa F405
lowerCamelCase_ : Optional[int] = """mid"""
def __UpperCAmelCase( self ):
__A : int = {
"in_channels": 32,
"temb_channels": 128,
}
__A : str = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Dict = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase_ : Tuple = """mid"""
def __UpperCAmelCase( self ):
__A : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__A : int = 32
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Any = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase_ : Tuple = """mid"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__A : Optional[int] = 32
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Tuple = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = UpBlockaD # noqa F405
lowerCamelCase_ : List[Any] = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase_ : Dict = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Dict = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[str] = CrossAttnUpBlockaD # noqa F405
lowerCamelCase_ : List[str] = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Dict = super().prepare_init_args_and_inputs_for_common()
__A : List[str] = 32
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : List[Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase_ : Tuple = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Tuple = super().prepare_init_args_and_inputs_for_common()
__A : int = 32
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Optional[int] = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = AttnUpBlockaD # noqa F405
lowerCamelCase_ : Union[str, Any] = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __UpperCAmelCase( self ):
__A : Tuple = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = SkipUpBlockaD # noqa F405
lowerCamelCase_ : Optional[int] = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = AttnSkipUpBlockaD # noqa F405
lowerCamelCase_ : Dict = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = UpDecoderBlockaD # noqa F405
lowerCamelCase_ : Dict = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = {"in_channels": 32, "out_channels": 32}
__A : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : Any = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(__UpperCAmelCase )
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase_ : Tuple = """up"""
@property
def __UpperCAmelCase( self ):
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : int = {"in_channels": 32, "out_channels": 32}
__A : int = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase( self ):
__A : int = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(__UpperCAmelCase )
| 718
|
def lowerCamelCase_ ( _lowercase = 2_000_000 ) -> int:
__A : str = [0 for i in range(n + 1 )]
__A : int = 1
__A : Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowercase ):
__A : str = 1
__A : Union[str, Any] = 0
for i in range(_lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 387
| 0
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a ):
print('''Loading config file...''' )
def flatten_yaml_as_dict(__a, __a="", __a="." ):
SCREAMING_SNAKE_CASE_ = []
for k, v in d.items():
SCREAMING_SNAKE_CASE_ = parent_key + sep + k if parent_key else k
if isinstance(__a, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__a, __a, sep=__a ).items() )
else:
items.append((new_key, v) )
return dict(__a )
SCREAMING_SNAKE_CASE_ = argparse.Namespace()
with open(__a, '''r''' ) as yaml_file:
try:
SCREAMING_SNAKE_CASE_ = yaml.load(__a, Loader=yaml.FullLoader )
SCREAMING_SNAKE_CASE_ = flatten_yaml_as_dict(__a )
for k, v in flat_cfg.items():
setattr(__a, __a, __a )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(__a, str(__a ) ) )
return config
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = MobileViTVaConfig()
SCREAMING_SNAKE_CASE_ = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
SCREAMING_SNAKE_CASE_ = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
SCREAMING_SNAKE_CASE_ = 384
else:
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
SCREAMING_SNAKE_CASE_ = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
SCREAMING_SNAKE_CASE_ = 384
else:
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
SCREAMING_SNAKE_CASE_ = 151
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE_ = True
elif task_name.startswith('''voc_''' ):
SCREAMING_SNAKE_CASE_ = 21
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = '''pascal-voc-id2label.json'''
SCREAMING_SNAKE_CASE_ = True
# orig_config
SCREAMING_SNAKE_CASE_ = load_orig_config_file(__a )
assert getattr(__a, '''model.classification.name''', -1 ) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE_ = getattr(__a, '''model.classification.mitv2.width_multiplier''', 1.0 )
assert (
getattr(__a, '''model.classification.mitv2.attn_norm_layer''', -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE_ = getattr(__a, '''model.classification.activation.name''', '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''model.segmentation.output_stride''', 16 )
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''model.segmentation.deeplabv3.aspp_rates''', [12, 24, 36] )
SCREAMING_SNAKE_CASE_ = getattr(__a, '''model.segmentation.deeplabv3.aspp_out_channels''', 512 )
SCREAMING_SNAKE_CASE_ = getattr(__a, '''model.segmentation.deeplabv3.aspp_dropout''', 0.1 )
# id2label
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = dct.pop(__a )
SCREAMING_SNAKE_CASE_ = val
def _lowerCamelCase ( __a, __a=False ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''''''
else:
SCREAMING_SNAKE_CASE_ = '''mobilevitv2.'''
SCREAMING_SNAKE_CASE_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE_ = k[8:]
else:
SCREAMING_SNAKE_CASE_ = k
if ".block." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.block.''', '''.''' )
if ".conv." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.conv.''', '''.convolution.''' )
if ".norm." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.norm.''', '''.normalization.''' )
if "conv_1." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''conv_1.''', F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(F'layer_{i}.', F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.exp_1x1.''', '''.expand_1x1.''' )
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.red_1x1.''', '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(F'layer_{i}.0.', F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(F'layer_{i}.1.local_rep.0.', F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(F'layer_{i}.1.local_rep.1.', F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE_ = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE_ = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(
F'layer_{i}.1.global_rep.{j}.', F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.', F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(F'layer_{i}.1.conv_proj.', F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''pre_norm_attn.0.''', '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''pre_norm_attn.1.''', '''attention.''' )
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''pre_norm_ffn.0.''', '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''pre_norm_ffn.1.''', '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''pre_norm_ffn.3.''', '''ffn.conv2.''' )
if "classifier.1." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''classifier.1.''', '''classifier.''' )
if "seg_head." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''seg_head.''', '''segmentation_head.''' )
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.aspp_layer.''', '''.''' )
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace('''.aspp_pool.''', '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(__a )
for k in keys_to_ignore:
state_dict.pop(__a, __a )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = get_mobilevitva_config(__a, __a )
# load original state_dict
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
SCREAMING_SNAKE_CASE_ = MobileViTVaForSemanticSegmentation(__a ).eval()
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = MobileViTVaForImageClassification(__a ).eval()
SCREAMING_SNAKE_CASE_ = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE_ = checkpoint
remove_unused_keys(__a )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__a, base_model=__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a, __a, __a )
# load modified state_dict
model.load_state_dict(__a )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img(), return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = model(**__a )
# verify classification model
if task_name.startswith('''imagenet''' ):
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ = logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3], __a, atol=1E-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 626
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = StableDiffusionSAGPipeline
_lowercase : Dict = TEXT_TO_IMAGE_PARAMS
_lowercase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : Tuple = False
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
torch.manual_seed(0)
snake_case__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
snake_case__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0)
snake_case__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ = CLIPTextModel(UpperCamelCase__)
snake_case__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
snake_case__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=0):
'''simple docstring'''
if str(UpperCamelCase__).startswith("""mps"""):
snake_case__ = torch.manual_seed(UpperCamelCase__)
else:
snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__)
snake_case__ = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ ( self : Dict):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Tuple):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""")
snake_case__ = sag_pipe.to(UpperCamelCase__)
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = """."""
snake_case__ = torch.manual_seed(0)
snake_case__ = sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""")
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
snake_case__ = sag_pipe.to(UpperCamelCase__)
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = """."""
snake_case__ = torch.manual_seed(0)
snake_case__ = sag_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""")
snake_case__ = output.images
snake_case__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
snake_case__ = sag_pipe.to(UpperCamelCase__)
sag_pipe.set_progress_bar_config(disable=UpperCamelCase__)
snake_case__ = """."""
snake_case__ = torch.manual_seed(0)
snake_case__ = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=UpperCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" , )
snake_case__ = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 707
|
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[float], float] , a : float , a : float ):
snake_case__ = a
snake_case__ = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
snake_case__ = mid
else:
snake_case__ = mid
snake_case__ = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( a : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 99
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __snake_case ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_UpperCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert("RGB" )
return image
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = val
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : List[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_UpperCAmelCase : List[str] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_UpperCAmelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) )
_UpperCAmelCase : Union[str, Any] = qkv_bias
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = 364 if "coco" in model_name else 224
_UpperCAmelCase : Dict = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : Any = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
_UpperCAmelCase : Optional[Any] = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__ , text_config=SCREAMING_SNAKE_CASE__ )
return config, image_size
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_UpperCAmelCase : Optional[int] = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase : int = get_blipa_config(SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
_UpperCAmelCase : Optional[int] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_UpperCAmelCase , _UpperCAmelCase : str = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE__ , model_type=SCREAMING_SNAKE_CASE__ , is_eval=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase : Optional[Any] = original_model.state_dict()
_UpperCAmelCase : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_UpperCAmelCase : Dict = key.replace("self" , "attention" )
if "opt_proj" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase : Optional[Any] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
_UpperCAmelCase : Dict = key.replace("opt" , "language" )
if key.startswith("t5" ):
_UpperCAmelCase : Union[str, Any] = key.replace("t5" , "language" )
_UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : Dict = load_demo_image()
_UpperCAmelCase : List[Any] = vis_processors["eval"](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
# create processor
_UpperCAmelCase : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
hf_model.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : str = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_UpperCAmelCase : Optional[int] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).logits
else:
_UpperCAmelCase : List[Any] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_UpperCAmelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase : Union[str, Any] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=SCREAMING_SNAKE_CASE__ )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Any = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=SCREAMING_SNAKE_CASE__ )
else:
# cast to same type
_UpperCAmelCase : List[str] = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
_UpperCAmelCase : int = ""
_UpperCAmelCase : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = original_model.generate({"image": original_pixel_values} )
_UpperCAmelCase : int = hf_model.generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = input_ids.shape[1]
_UpperCAmelCase : List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [text.strip() for text in output_text]
print("HF generation:" , SCREAMING_SNAKE_CASE__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
_lowerCAmelCase : List[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowerCAmelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 289
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( lowerCAmelCase__ , unittest.TestCase ):
_snake_case =PriorTransformer
_snake_case ="hidden_states"
@property
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =8
UpperCAmelCase_ =7
UpperCAmelCase_ =floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCAmelCase_ =floats_tensor((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCAmelCase_ =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Union[str, Any]=0 ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(_lowerCamelCase )
UpperCAmelCase_ =4
UpperCAmelCase_ =8
UpperCAmelCase_ =7
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCAmelCase_ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
return (4, 8)
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
return (4, 8)
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ ={
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
UpperCAmelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCamelCase )
UpperCAmelCase_ =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ =self.model_class(**_lowerCamelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _lowerCamelCase )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
UpperCAmelCase_ =model.to(_lowerCamelCase )
if hasattr(_lowerCamelCase , "set_default_attn_processor" ):
model.set_default_attn_processor()
UpperCAmelCase_ =self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCamelCase )[0]
UpperCAmelCase_ =output[0, :5].flatten().cpu()
print(_lowerCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase_ =torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Union[str, Any]=1 , _lowerCAmelCase: Optional[int]=768 , _lowerCAmelCase: Optional[Any]=77 , _lowerCAmelCase: Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(_lowerCamelCase )
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =embedding_dim
UpperCAmelCase_ =num_embeddings
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCAmelCase_ =torch.randn((batch_size, embedding_dim) ).to(_lowerCamelCase )
UpperCAmelCase_ =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Dict , _lowerCAmelCase: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_lowerCamelCase )
UpperCAmelCase_ =self.get_dummy_seed_input(seed=_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCamelCase )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase_ =sample[0, :8].flatten().cpu()
print(_lowerCamelCase )
UpperCAmelCase_ =torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] =logging.get_logger(__name__)
__lowercase : List[Any] ={
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''gptsan-japanese'''
_snake_case =[
'''past_key_values''',
]
_snake_case ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Optional[Any] , _lowerCAmelCase: List[Any]=3_6000 , _lowerCAmelCase: List[Any]=1280 , _lowerCAmelCase: str=1024 , _lowerCAmelCase: Any=8192 , _lowerCAmelCase: str=4096 , _lowerCAmelCase: int=128 , _lowerCAmelCase: int=10 , _lowerCAmelCase: Dict=0 , _lowerCAmelCase: Any=16 , _lowerCAmelCase: Optional[int]=16 , _lowerCAmelCase: List[Any]=128 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: Optional[Any]=1e-5 , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.0 , _lowerCAmelCase: str="float32" , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Any=False , _lowerCAmelCase: int=False , _lowerCAmelCase: Union[str, Any]=0.0_02 , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: int=True , _lowerCAmelCase: List[str]=3_5998 , _lowerCAmelCase: Optional[int]=3_5995 , _lowerCAmelCase: Dict=3_5999 , **_lowerCAmelCase: Optional[Any] , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =d_model
UpperCAmelCase_ =d_ff
UpperCAmelCase_ =d_ext
UpperCAmelCase_ =d_spout
UpperCAmelCase_ =num_switch_layers
UpperCAmelCase_ =num_ext_layers
UpperCAmelCase_ =num_switch_layers + num_ext_layers
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =num_experts
UpperCAmelCase_ =expert_capacity
UpperCAmelCase_ =dropout_rate
UpperCAmelCase_ =layer_norm_epsilon
UpperCAmelCase_ =router_bias
UpperCAmelCase_ =router_jitter_noise
UpperCAmelCase_ =router_dtype
UpperCAmelCase_ =router_ignore_padding_tokens
UpperCAmelCase_ =output_hidden_states
UpperCAmelCase_ =output_attentions
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =output_router_logits
UpperCAmelCase_ =use_cache
super().__init__(
separator_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 550
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
SCREAMING_SNAKE_CASE : int = TaTokenizerFast
SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 257
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Union[str, Any] = 50_003
SCREAMING_SNAKE_CASE : Any = 50_002
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict =PLBartTokenizer
lowercase : int =None
lowercase : Optional[int] =False
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ :Optional[Any] = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :int = PLBartTokenizer(UpperCamelCase_ , language_codes='''base''' , keep_accents=UpperCamelCase_ )
lowercase_ :Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ :List[str] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase_ :List[str] = tokenizer.vocab_size
lowercase_ :Union[str, Any] = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 4 , UpperCamelCase_ )]
self.assertListEqual(UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase_ :Dict = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase_ :Union[str, Any] = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = PLBartTokenizer(UpperCamelCase_ , language_codes='''multi''' , keep_accents=UpperCamelCase_ )
lowercase_ :str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ :int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase_ :Union[str, Any] = tokenizer.vocab_size
lowercase_ :Optional[Any] = [tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) for x in range(end - 7 , UpperCamelCase_ )]
self.assertListEqual(
UpperCamelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase_ :List[Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase_ :Any = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) , UpperCamelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] ="""uclanlp/plbart-python-en_XX"""
lowercase : Union[str, Any] =[
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowercase : Union[str, Any] =[
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowercase : int =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase_ :Any = 1
return cls
def UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0003 )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase_ :Union[str, Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase_ :List[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase_ :Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , UpperCamelCase_ )
lowercase_ :Any = 10
lowercase_ :Union[str, Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0004, 5_0001] )
def UpperCamelCase ( self ):
lowercase_ :int = tempfile.mkdtemp()
lowercase_ :str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = PLBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , UpperCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ :List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase_ :Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase_ :str = targets['''input_ids''']
lowercase_ :List[Any] = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 5_0003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0001,
} , )
| 257
| 1
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A_ : Any = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64
|
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64
| 1
|
from __future__ import annotations
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> list[str]:
if nth_term == "":
return [""]
UpperCamelCase_: List[str] = int(UpperCAmelCase__ )
UpperCamelCase_: Any = int(UpperCAmelCase__ )
UpperCamelCase_: list[str] = []
for temp in range(int(UpperCAmelCase__ ) ):
series.append(F'''1 / {pow(temp + 1 , int(UpperCAmelCase__ ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input('Enter the last number (nth term) of the P-Series'))
A_ : Tuple = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 57
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=7 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=99 ,_lowerCamelCase=64 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=16 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=None ,) -> Dict:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = vocab_size - 1
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = GPTNeoXModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = True
__lowercase = GPTNeoXModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = GPTNeoXForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,use_cache=_lowerCamelCase )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] ,dim=-1 )
__lowercase = model(_lowerCamelCase ,attention_mask=_lowerCamelCase ,output_hidden_states=_lowerCamelCase )
__lowercase = output_from_no_past['''hidden_states'''][0]
__lowercase = model(
_lowerCamelCase ,attention_mask=_lowerCamelCase ,past_key_values=_lowerCamelCase ,output_hidden_states=_lowerCamelCase ,)['''hidden_states'''][0]
# select random slice
__lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a : Dict = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Dict = False
a : Optional[Any] = False
a : Tuple = False
a : List[Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = GPTNeoXModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,hidden_size=64 ,num_attention_heads=8 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] ,config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = GPTNeoXModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
__lowercase = original_model(_lowerCamelCase ).last_hidden_state
__lowercase = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {'''type''': scaling_type, '''factor''': 1_0.0}
__lowercase = GPTNeoXModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
__lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
__lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1E-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
__lowercase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase )
__lowercase = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
__lowercase = model.generate(**_lowerCamelCase ,do_sample=_lowerCamelCase ,max_new_tokens=20 )
__lowercase = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
| 502
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class _snake_case ( _A ):
_A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self ,**UpperCamelCase ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case__ :List[Any] = deprecated_arg[3:]
setattr(self ,A__ ,not kwargs.pop(A__ ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
snake_case__ :Union[str, Any] = kwargs.pop("torchscript" ,self.torchscript )
snake_case__ :Union[str, Any] = kwargs.pop("torch_xla_tpu_print_metrics" ,self.torch_xla_tpu_print_metrics )
snake_case__ :Optional[int] = kwargs.pop("fp16_opt_level" ,self.fpaa_opt_level )
super().__init__(**A__ )
_A = field(default=_A , metadata={'help': 'Trace the models using torchscript'} )
_A = field(default=_A , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
_A = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
requires_backends(self ,["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
snake_case__ :int = torch.device("cpu" )
snake_case__ :Dict = 0
elif is_torch_tpu_available():
snake_case__ :Union[str, Any] = xm.xla_device()
snake_case__ :str = 0
else:
snake_case__ :Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
snake_case__ :str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self ) -> Dict:
requires_backends(self ,["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
requires_backends(self ,["torch"] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
requires_backends(self ,["torch"] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self ) -> Any:
return self.n_gpu > 0
| 719
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 0
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self ,**_A ):
'''simple docstring'''
requires_backends(self ,['bs4'] )
super().__init__(**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCAmelCase : List[str] = parent.find_all(child.name ,recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A ,1 ) if s is child ) )
_lowerCAmelCase : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BeautifulSoup(_A ,'html.parser' )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_lowerCAmelCase : Dict = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
_lowerCAmelCase, _lowerCAmelCase : Dict = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(_A ) != len(_A ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ''
for tagname, subs in zip(_A ,_A ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = False
# Check that strings has a valid type
if isinstance(_A ,_A ):
_lowerCAmelCase : Union[str, Any] = True
elif isinstance(_A ,(list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] ,_A ):
_lowerCAmelCase : Any = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"""but is of type {type(_A )}.""" )
_lowerCAmelCase : List[str] = bool(isinstance(_A ,(list, tuple) ) and (isinstance(html_strings[0] ,_A )) )
if not is_batched:
_lowerCAmelCase : str = [html_strings]
# Get nodes + xpaths
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = []
for html_string in html_strings:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = self.get_three_from_single(_A )
nodes.append(_A )
_lowerCAmelCase : Any = []
for node, tag_list, sub_list in zip(_A ,_A ,_A ):
_lowerCAmelCase : str = self.construct_xpath(_A ,_A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
_lowerCAmelCase : Optional[int] = {'nodes': nodes, 'xpaths': xpaths}
_lowerCAmelCase : List[Any] = BatchFeature(data=_A ,tensor_type=_A )
return encoded_inputs
| 259
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : int = self.vocab_size - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
_lowerCAmelCase : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Dict = model(_A ,token_type_ids=_A ,head_mask=_A )
_lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A )
_lowerCAmelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : str = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : Any = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : int = config_and_inputs
_lowerCAmelCase : Any = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCAmelCase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __lowerCamelCase ( self ,_A ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : int = super()._prepare_for_class(_A ,_A ,return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCAmelCase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_A ,)
_lowerCAmelCase : int = inputs_dict['labels']
_lowerCAmelCase : Tuple = inputs_dict['labels']
_lowerCAmelCase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_A ,)
_lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OpenAIGPTModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self ,config_class=_A ,n_embd=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_A )
_lowerCAmelCase : Any = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_A ) # the president is
_lowerCAmelCase : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCAmelCase : str = model.generate(_A ,do_sample=_A )
self.assertListEqual(output_ids[0].tolist() ,_A )
| 259
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCAmelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=None ):
'''simple docstring'''
require_version(deps[pkg] ,SCREAMING_SNAKE_CASE__ )
| 704
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 481
| 0
|
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 108
|
from itertools import permutations
def lowerCAmelCase__ ( _a : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(_a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( _a : int = 10 ):
return sum(
int("".join(map(_a , _a ) ) )
for num in permutations(range(_a ) )
if is_substring_divisible(_a ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568
| 0
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCAmelCase : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : Optional[int] = logging.getLogger()
def A_( ):
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('-f')
UpperCamelCase = parser.parse_args()
return args.f
def A_( A : Dict , A : Tuple="eval"):
UpperCamelCase = os.path.join(A , f'''{split}_results.json''')
if os.path.exists(A):
with open(A , 'r') as f:
return json.load(A)
raise ValueError(f'''can\'t find {path}''')
lowerCAmelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_glue.main()
UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_clm_flax.main()
UpperCamelCase = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_summarization_flax.main()
UpperCamelCase = get_results(A_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_mlm_flax.main()
UpperCamelCase = get_results(A_ )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_ta_mlm_flax.main()
UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = 7 if get_gpu_count() > 1 else 2
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_flax_ner.main()
UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(A_ , 'argv' , A_ ):
run_qa.main()
UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 432
|
'''simple docstring'''
lowerCAmelCase : Optional[Any] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 432
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 479
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
SCREAMING_SNAKE_CASE_ = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ = re.compile(rF"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 597
| 0
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCAmelCase = "CompVis/stable-diffusion-v1-1"
__lowerCAmelCase = "CompVis/stable-diffusion-v1-2"
__lowerCAmelCase = "CompVis/stable-diffusion-v1-3"
__lowerCAmelCase = "CompVis/stable-diffusion-v1-4"
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : str , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase : StableDiffusionSafetyChecker , __UpperCamelCase : CLIPImageProcessor , __UpperCamelCase : bool = True , ):
super()._init_()
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = StableDiffusionPipeline(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , requires_safety_checker=__UpperCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Dict ):
return {k: getattr(self , __UpperCamelCase ) for k in self.config.keys() if not k.startswith("_" )}
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Union[str, Any] , ):
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : str , ):
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[int] , ):
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[int] , ):
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Tuple , ):
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__UpperCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCAmelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCAmelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCAmelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCAmelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 711
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if metric == "rouge2":
_UpperCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_lowerCAmelCase , filename=_lowerCAmelCase , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_lowerCAmelCase , verbose=_lowerCAmelCase , )
class __SCREAMING_SNAKE_CASE ( pl.Callback):
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
_UpperCAmelCase = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / "test_results.txt"
_UpperCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_UpperCAmelCase = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , "a+" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'''{key}: {val:.6f}\n'''
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str ):
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , "test" )
@rank_zero_only
def UpperCAmelCase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 129
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] , truncation=__snake_case )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__a: Union[str, Any] = HfArgumentParser(PretokenizationArguments)
__a: List[str] = parser.parse_args()
if args.num_workers is None:
__a: List[Any] = multiprocessing.cpu_count()
__a: Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a: List[str] = time.time()
__a: str = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
__a: Union[str, Any] = time.time()
__a: List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
__a: Dict = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 108
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=2 , lowercase=3 , lowercase=4 , lowercase=2 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=36 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=6 , lowercase=6 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=1000 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = coordinate_size
A__ = shape_size
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ = text_seq_length
A__ = (image_size // patch_size) ** 2 + 1
A__ = self.text_seq_length + self.image_seq_length
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = tmp_coordinate
A__ = tf.constant(lowercase )
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.text_seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = TFLayoutLMvaModel(config=lowercase )
# text + image
A__ = model(lowercase , pixel_values=lowercase , training=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , training=lowercase , )
A__ = model(lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A__ = model(lowercase , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A__ = model({"pixel_values": pixel_values} , training=lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForSequenceClassification(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFLayoutLMvaForTokenClassification(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , training=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = 2
A__ = TFLayoutLMvaForQuestionAnswering(config=lowercase )
A__ = model(
lowercase , bbox=lowercase , pixel_values=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , training=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
return True
def UpperCamelCase ( self , lowercase , lowercase , lowercase=False ) -> dict:
'''simple docstring'''
A__ = copy.deepcopy(lowercase )
if model_class in get_values(lowercase ):
A__ = {
k: tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase ):
A__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase ):
A__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFLayoutLMvaModelTester(self )
A__ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase )
if getattr(lowercase , "hf_compute_loss" , lowercase ):
# The number of elements in the loss should be the same as the number of elements in the label
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase )[0]
]
A__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class.pop("input_ids" )
A__ = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
A__ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A__ = -100
A__ = tf.convert_to_tensor(lowercase )
A__ = model(lowercase , **lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
A__ = model(lowercase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A__ = self._prepare_for_class(inputs_dict.copy() , lowercase , return_labels=lowercase )
# Get keys that were added with the _prepare_for_class function
A__ = prepared_for_class.keys() - inputs_dict.keys()
A__ = inspect.signature(model.call ).parameters
A__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A__ = {0: "input_ids"}
for label_key in label_keys:
A__ = signature_names.index(lowercase )
A__ = label_key
A__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A__ = prepared_for_class[value]
A__ = tuple(lowercase )
# Send to model
A__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFLayoutLMvaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowercase ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase , return_tensors="tf" ).pixel_values
A__ = tf.constant([[1, 2]] )
A__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A__ = model(input_ids=lowercase , bbox=lowercase , pixel_values=lowercase , training=lowercase )
# verify the logits
A__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A__ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ) )
| 514
| 0
|
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCamelCase_ : Any = namedtuple('''covid_data''', '''cases deaths recovered''')
def __magic_name__( _A = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
UpperCamelCase__ = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) )
lowerCamelCase_ : Any = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 265
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : pyspark.sql.DataFrame , lowercase : Optional[NamedSplit] = None , lowercase : Optional[Features] = None , lowercase : bool = True , lowercase : str = None , lowercase : bool = False , lowercase : str = None , lowercase : bool = True , lowercase : str = "arrow" , **lowercase : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , **lowercase , )
UpperCamelCase__ = load_from_cache_file
UpperCamelCase__ = file_format
UpperCamelCase__ = Spark(
df=lowercase , features=lowercase , cache_dir=lowercase , working_dir=lowercase , **lowercase , )
def A ( self : int ) -> Any:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 265
| 1
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def __lowerCamelCase ( ) -> Optional[Any]:
_UpperCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , _lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def __lowerCamelCase ( ) -> Optional[int]:
return __name__.split("." )[0]
def __lowerCamelCase ( ) -> List[str]:
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ( ) -> List[Any]:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_UpperCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
_UpperCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_UpperCAmelCase = False
def __lowerCamelCase ( ) -> Any:
global _default_handler
with _lock:
if not _default_handler:
return
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_UpperCAmelCase = None
def __lowerCamelCase ( ) -> str:
return log_levels
def __lowerCamelCase ( _lowerCAmelCase = None ) -> Any:
if name is None:
_UpperCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowercase )
def __lowerCamelCase ( ) -> Tuple:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase ( _lowerCAmelCase ) -> List[Any]:
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowercase )
def __lowerCamelCase ( ) -> Optional[Any]:
return set_verbosity(_lowercase )
def __lowerCamelCase ( ) -> Optional[int]:
return set_verbosity(_lowercase )
def __lowerCamelCase ( ) -> List[Any]:
return set_verbosity(_lowercase )
def __lowerCamelCase ( ) -> Union[str, Any]:
return set_verbosity(_lowercase )
def __lowerCamelCase ( ) -> List[Any]:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ( ) -> Optional[int]:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowercase )
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowercase )
def __lowerCamelCase ( ) -> Tuple:
_configure_library_root_logger()
_UpperCAmelCase = False
def __lowerCamelCase ( ) -> Optional[int]:
_configure_library_root_logger()
_UpperCAmelCase = True
def __lowerCamelCase ( ) -> List[str]:
_UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
_UpperCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_lowercase )
def __lowerCamelCase ( ) -> Optional[Any]:
_UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowercase )
def __lowerCamelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
_UpperCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _lowercase )
if no_advisory_warnings:
return
self.warning(*_lowercase , **_lowercase )
__lowerCAmelCase = warning_advice
@functools.lru_cache(_lowercase )
def __lowerCamelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
self.warning(*_lowercase , **_lowercase )
__lowerCAmelCase = warning_once
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : List[str] ): # pylint: disable=unused-argument
_UpperCAmelCase = args[0] if args else None
def __iter__( self : Tuple ):
return iter(self._iterator )
def __getattr__( self : Optional[int] , __UpperCamelCase : Any ):
def empty_fn(*__UpperCamelCase : str , **__UpperCamelCase : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
return self
def __exit__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ):
return
class __SCREAMING_SNAKE_CASE :
def __call__( self : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Union[str, Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__ )
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ):
_UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def __lowerCamelCase ( ) -> Tuple:
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ( ) -> List[Any]:
global _tqdm_active
_UpperCAmelCase = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ( ) -> int:
global _tqdm_active
_UpperCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 684
|
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = word.split()
def justify(_lowercase , _lowercase , _lowercase ) -> str:
SCREAMING_SNAKE_CASE : Union[str, Any] = max_width - width
SCREAMING_SNAKE_CASE : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
SCREAMING_SNAKE_CASE : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
SCREAMING_SNAKE_CASE : List[str] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
SCREAMING_SNAKE_CASE : Dict = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : list[str] = []
SCREAMING_SNAKE_CASE : Tuple = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = [word], len(_lowercase )
SCREAMING_SNAKE_CASE : Any = max_width - width - len(_lowercase )
answer.append(''' '''.join(_lowercase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 248
| 0
|
import math
from datetime import datetime, timedelta
def UpperCamelCase ( _A : Tuple )-> str:
"""simple docstring"""
A__ = year % 19
A__ = year % 4
A__ = year % 7
A__ = math.floor(year / 100 )
A__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
A__ = leap_day_inhibits / 4
A__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
A__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
A__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
A__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_ , 4 , 18 )
else:
return datetime(lowerCAmelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
UpperCAmelCase_ : Optional[Any] = 'will be' if year > datetime.now().year else 'was'
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 703
|
def UpperCamelCase ( _A : list[list[int]] , _A : int , _A : int , _A : list[int] )-> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCamelCase ( _A : list[list[int]] , _A : list[int] , _A : int )-> bool:
"""simple docstring"""
if curr_ind == len(_A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_A ) ):
if valid_connection(_A , _A , _A , _A ):
# Insert current vertex into path as next transition
A__ = next_ver
# Validate created path
if util_hamilton_cycle(_A , _A , curr_ind + 1 ):
return True
# Backtrack
A__ = -1
return False
def UpperCamelCase ( _A : list[list[int]] , _A : int = 0 )-> list[int]:
"""simple docstring"""
A__ = [-1] * (len(_A ) + 1)
# initialize start and end of path with starting index
A__ = A__ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_A , _A , 1 ) else []
| 232
| 0
|
_lowerCAmelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
_lowerCAmelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def UpperCamelCase_( _snake_case : float , _snake_case : str , _snake_case : str ):
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__a =(
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(_snake_case )}'
)
raise ValueError(_snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_lowerCAmelCase : List[str] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCAmelCase : Tuple = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
_lowerCAmelCase : Optional[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case , __snake_case=None , __snake_case="base" , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__snake_case )
__a =0
__a =Path(self.hparams.output_dir )
__a =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__snake_case , **__snake_case , )
else:
__a =config
__a =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __snake_case , __snake_case ):
assert hasattr(self.config , __snake_case ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __snake_case , getattr(self.hparams , __snake_case ) )
if tokenizer is None:
__a =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__snake_case , )
else:
__a =tokenizer
__a =MODEL_MODES[mode]
if model is None:
__a =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__snake_case , )
else:
__a =model
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> int:
'''simple docstring'''
__a =self.model_type.from_pretrained(*__snake_case , **__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =arg_to_scheduler[self.hparams.lr_scheduler]
__a =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.model
__a =['bias', 'LayerNorm.weight']
__a =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__a =Adafactor(
__snake_case , lr=self.hparams.learning_rate , scale_parameter=__snake_case , relative_step=__snake_case )
else:
__a =AdamW(
__snake_case , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a =optimizer
__a =self.get_lr_scheduler()
return [optimizer], [scheduler]
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
return self.validation_step(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.validation_end(__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if stage == "test":
__a =len(self.test_dataloader().dataset )
else:
__a =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__snake_case )
__a =len(self.train_dataloader().dataset )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = False ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.train_loader
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__snake_case , list(filter(__snake_case , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.output_dir.joinpath('best_tfmr' )
__a =self.step_count
self.model.save_pretrained(__snake_case )
self.tokenizer.save_pretrained(__snake_case )
@staticmethod
def __magic_name__ ( __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__snake_case , type=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__snake_case , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__snake_case , type=__snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'cache' ) , type=__snake_case , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__snake_case , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__snake_case , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__snake_case , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__snake_case , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__snake_case , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__snake_case , metavar=__snake_case , type=__snake_case , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__snake_case , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__snake_case )
parser.add_argument('--train_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--eval_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--adafactor' , action='store_true' )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__snake_case )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =trainer.lr_schedulers[0]['scheduler']
__a ={f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__a =trainer.callback_metrics
# Log results
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__a =trainer.callback_metrics
# Log and save results to file
__a =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__snake_case , 'w' ) as writer:
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=_snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def UpperCamelCase_( _snake_case : BaseTransformer , _snake_case : argparse.Namespace , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=True , _snake_case : Dict=[] , _snake_case : List[str]=None , _snake_case : Any=None , **_snake_case : str , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__a =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__a =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_snake_case )
if logging_callback is None:
__a =LoggingCallback()
__a ={}
if args.fpaa:
__a =16
if args.gpus > 1:
__a ='auto'
__a ='ddp'
__a =args.accumulate_grad_batches
__a =None
__a ='auto'
__a =pl.Trainer.from_argparse_args(
_snake_case , weights_summary=_snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **_snake_case , )
if args.do_train:
trainer.fit(_snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 242
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['ConditionalDetrFeatureExtractor']
__lowerCamelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowerCamelCase : Optional[int] = TypeVar('T')
class UpperCAmelCase ( Generic[T]):
"""simple docstring"""
lowerCAmelCase_ = 42 # Cache store of keys
lowerCAmelCase_ = 42 # References of the keys in cache
lowerCAmelCase_ = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , UpperCamelCase__ : int ) -> None:
_UpperCamelCase =deque()
_UpperCamelCase =set()
if not n:
_UpperCamelCase =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_UpperCamelCase =n
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase =self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : str ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 271
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : int = "MobileNetV1Config"
# Base docstring
SCREAMING_SNAKE_CASE : str = "google/mobilenet_v1_1.0_224"
SCREAMING_SNAKE_CASE : int = [1, 1024, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : List[str] = "google/mobilenet_v1_1.0_224"
SCREAMING_SNAKE_CASE : Any = "tabby, tabby cat"
SCREAMING_SNAKE_CASE : Optional[Any] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> int:
_lowercase : Dict = {}
if isinstance(__A , __A ):
_lowercase : List[Any] = model.mobilenet_va
else:
_lowercase : Optional[int] = model
_lowercase : Tuple = 'MobilenetV1/Conv2d_0/'
_lowercase : Optional[int] = backbone.conv_stem.convolution.weight
_lowercase : Union[str, Any] = backbone.conv_stem.normalization.bias
_lowercase : List[Any] = backbone.conv_stem.normalization.weight
_lowercase : List[Any] = backbone.conv_stem.normalization.running_mean
_lowercase : List[Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_lowercase : Any = i + 1
_lowercase : List[Any] = i * 2
_lowercase : Tuple = backbone.layer[pt_index]
_lowercase : List[str] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_lowercase : Any = pointer.convolution.weight
_lowercase : List[str] = pointer.normalization.bias
_lowercase : str = pointer.normalization.weight
_lowercase : int = pointer.normalization.running_mean
_lowercase : Union[str, Any] = pointer.normalization.running_var
_lowercase : Tuple = backbone.layer[pt_index + 1]
_lowercase : List[Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_lowercase : int = pointer.convolution.weight
_lowercase : int = pointer.normalization.bias
_lowercase : List[str] = pointer.normalization.weight
_lowercase : str = pointer.normalization.running_mean
_lowercase : Tuple = pointer.normalization.running_var
if isinstance(__A , __A ):
_lowercase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_lowercase : List[str] = model.classifier.weight
_lowercase : List[str] = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_lowercase : Tuple = tf.train.list_variables(__A )
_lowercase : Optional[Any] = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
_lowercase : Tuple = tf.train.load_variable(__A , __A )
_lowercase : List[str] = array
# Build TF to PyTorch weights loading map
_lowercase : List[str] = _build_tf_to_pytorch_map(__A , __A , __A )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
_lowercase : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_lowercase : Tuple = np.transpose(__A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_lowercase : List[str] = array.squeeze().transpose()
else:
_lowercase : List[str] = np.transpose(__A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
_lowercase : List[str] = torch.from_numpy(__A )
tf_weights.pop(__A , __A )
tf_weights.pop(name + '/RMSProp' , __A )
tf_weights.pop(name + '/RMSProp_1' , __A )
tf_weights.pop(name + '/ExponentialMovingAverage' , __A )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> torch.Tensor:
_lowercase , _lowercase : List[Any] = features.shape[-2:]
_lowercase , _lowercase : str = conv_layer.stride
_lowercase , _lowercase : List[str] = conv_layer.kernel_size
if in_height % stride_height == 0:
_lowercase : Optional[int] = max(kernel_height - stride_height , 0 )
else:
_lowercase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_lowercase : Dict = max(kernel_width - stride_width , 0 )
else:
_lowercase : Optional[int] = max(kernel_width - (in_width % stride_width) , 0 )
_lowercase : int = pad_along_width // 2
_lowercase : Union[str, Any] = pad_along_width - pad_left
_lowercase : Dict = pad_along_height // 2
_lowercase : Optional[Any] = pad_along_height - pad_top
_lowercase : Optional[int] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__A , __A , 'constant' , 0.0 )
class _lowerCamelCase( nn.Module ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1, lowerCamelCase = 1, lowerCamelCase = False, lowerCamelCase = True, lowerCamelCase = True, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_lowercase : str = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''')
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''')
_lowercase : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2)
_lowercase : Tuple = nn.Convad(
in_channels=__lowerCamelCase, out_channels=__lowerCamelCase, kernel_size=__lowerCamelCase, stride=__lowerCamelCase, padding=__lowerCamelCase, groups=__lowerCamelCase, bias=__lowerCamelCase, padding_mode='zeros', )
if use_normalization:
_lowercase : List[Any] = nn.BatchNormad(
num_features=__lowerCamelCase, eps=config.layer_norm_eps, momentum=0.9_9_9_7, affine=__lowerCamelCase, track_running_stats=__lowerCamelCase, )
else:
_lowercase : List[Any] = None
if use_activation:
if isinstance(__lowerCamelCase, __lowerCamelCase):
_lowercase : Union[str, Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act, __lowerCamelCase):
_lowercase : Dict = ACTaFN[config.hidden_act]
else:
_lowercase : Union[str, Any] = config.hidden_act
else:
_lowercase : List[Any] = None
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
if self.config.tf_padding:
_lowercase : Optional[Any] = apply_tf_padding(__lowerCamelCase, self.convolution)
_lowercase : str = self.convolution(__lowerCamelCase)
if self.normalization is not None:
_lowercase : int = self.normalization(__lowerCamelCase)
if self.activation is not None:
_lowercase : Any = self.activation(__lowerCamelCase)
return features
class _lowerCamelCase( _a ):
lowercase_ : int = MobileNetVaConfig
lowercase_ : Tuple = load_tf_weights_in_mobilenet_va
lowercase_ : Optional[Any] = """mobilenet_v1"""
lowercase_ : Optional[Any] = """pixel_values"""
lowercase_ : Tuple = False
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase, (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase, nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
SCREAMING_SNAKE_CASE : List[Any] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
SCREAMING_SNAKE_CASE : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""", _a, )
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase = True) -> Tuple:
"""simple docstring"""
super().__init__(__lowerCamelCase)
_lowercase : int = config
_lowercase : Optional[Any] = 32
_lowercase : Optional[int] = max(int(depth * config.depth_multiplier), config.min_depth)
_lowercase : Optional[Any] = MobileNetVaConvLayer(
__lowerCamelCase, in_channels=config.num_channels, out_channels=__lowerCamelCase, kernel_size=3, stride=2, )
_lowercase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_lowercase : int = nn.ModuleList()
for i in range(13):
_lowercase : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_lowercase : Union[str, Any] = max(int(depth * config.depth_multiplier), config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase, in_channels=__lowerCamelCase, out_channels=__lowerCamelCase, kernel_size=3, stride=strides[i], groups=__lowerCamelCase, ))
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase, in_channels=__lowerCamelCase, out_channels=__lowerCamelCase, kernel_size=1, ))
_lowercase : int = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=__lowerCamelCase, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
_lowercase : Union[str, Any] = self.conv_stem(__lowerCamelCase)
_lowercase : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
_lowercase : Any = layer_module(__lowerCamelCase)
if output_hidden_states:
_lowercase : int = all_hidden_states + (hidden_states,)
_lowercase : int = hidden_states
if self.pooler is not None:
_lowercase : Tuple = torch.flatten(self.pooler(__lowerCamelCase), start_dim=1)
else:
_lowercase : Optional[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase, pooler_output=__lowerCamelCase, hidden_states=__lowerCamelCase, )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""", _a, )
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCamelCase)
_lowercase : Dict = config.num_labels
_lowercase : List[Any] = MobileNetVaModel(__lowerCamelCase)
_lowercase : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_lowercase : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob, inplace=__lowerCamelCase)
_lowercase : Optional[int] = nn.Linear(__lowerCamelCase, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__lowerCamelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, ) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : Optional[Any] = self.mobilenet_va(__lowerCamelCase, output_hidden_states=__lowerCamelCase, return_dict=__lowerCamelCase)
_lowercase : Dict = outputs.pooler_output if return_dict else outputs[1]
_lowercase : Optional[Any] = self.classifier(self.dropout(__lowerCamelCase))
_lowercase : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase : Optional[int] = 'single_label_classification'
else:
_lowercase : List[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowercase : List[Any] = MSELoss()
if self.num_labels == 1:
_lowercase : Optional[Any] = loss_fct(logits.squeeze(), labels.squeeze())
else:
_lowercase : Dict = loss_fct(__lowerCamelCase, __lowerCamelCase)
elif self.config.problem_type == "single_label_classification":
_lowercase : List[str] = CrossEntropyLoss()
_lowercase : List[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_lowercase : Tuple = BCEWithLogitsLoss()
_lowercase : Optional[Any] = loss_fct(__lowerCamelCase, __lowerCamelCase)
if not return_dict:
_lowercase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase, logits=__lowerCamelCase, hidden_states=outputs.hidden_states, )
| 89
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_SCREAMING_SNAKE_CASE = v.half()
if save_path is None: # overwrite src_path
_SCREAMING_SNAKE_CASE = src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 418
| 0
|
import math
def __lowerCamelCase ( A__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( A__ : int = 1_0001 ) -> int:
try:
lowerCamelCase_ : str = int(A__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
lowerCamelCase_ : list[int] = []
lowerCamelCase_ : Tuple = 2
while len(A__ ) < nth:
if is_prime(A__ ):
primes.append(A__ )
num += 1
else:
num += 1
return primes[len(A__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 171
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE_ (pl.LightningModule ):
'''simple docstring'''
def __init__( self : Any , __a : Optional[int] ) ->str:
super().__init__()
lowerCamelCase_ : List[Any] = model
lowerCamelCase_ : List[Any] = 2
lowerCamelCase_ : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self : Any ) ->Tuple:
pass
def __lowerCamelCase ( A__ : str , A__ : str , A__ : str ) -> Any:
# load longformer model from model identifier
lowerCamelCase_ : Tuple = LongformerModel.from_pretrained(A__ )
lowerCamelCase_ : Optional[Any] = LightningModel(A__ )
lowerCamelCase_ : Tuple = torch.load(A__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase_ : List[str] = LongformerForQuestionAnswering.from_pretrained(A__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(A__ )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ : Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 171
| 1
|
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
snake_case__ = len(__lowerCAmelCase )
snake_case__ = max(__lowerCAmelCase )
snake_case__ = min(__lowerCAmelCase )
# create the counting array
snake_case__ = coll_max + 1 - coll_min
snake_case__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __lowerCAmelCase ):
snake_case__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __lowerCAmelCase ) ):
snake_case__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return "".join([chr(__lowerCAmelCase ) for i in counting_sort([ord(__lowerCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 276
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = {'''facebook/bart-base''': BartForConditionalGeneration}
__magic_name__ = {'''facebook/bart-base''': BartTokenizer}
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--config_name" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__lowerCAmelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Where to store the final ONNX file." )
snake_case__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase="cpu" ):
snake_case__ = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case__ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
snake_case__ = 0
snake_case__ = None
snake_case__ = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
model.eval()
snake_case__ = None
snake_case__ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
snake_case__ = "My friends are cool but they eat too many carbs."
snake_case__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
snake_case__ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__lowerCAmelCase , )
logger.info("Model exported to {}".format(__lowerCAmelCase ) )
snake_case__ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(__lowerCAmelCase ) )
snake_case__ = onnxruntime.InferenceSession(__lowerCAmelCase )
snake_case__ = ort_sess.run(
__lowerCAmelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__lowerCAmelCase ),
"max_length": np.array(__lowerCAmelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = parse_args()
snake_case__ = 5
snake_case__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case__ = torch.device(args.device )
snake_case__ , snake_case__ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(__lowerCAmelCase )
if args.max_length:
snake_case__ = args.max_length
if args.num_beams:
snake_case__ = args.num_beams
if args.output_file_path:
snake_case__ = args.output_file_path
else:
snake_case__ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 276
| 1
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Optional[Any] = '''MCTCTFeatureExtractor'''
_lowercase : str = '''AutoTokenizer'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = self.feature_extractor
a__ = False
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
a__ = kwargs.pop('''raw_speech''' )
else:
a__ = kwargs.pop('''audio''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''text''' , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ = args[0]
a__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a__ = self.feature_extractor(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None:
a__ = self.tokenizer(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ = encodings['''input_ids''']
return inputs
def _UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''input_features''' , SCREAMING_SNAKE_CASE )
a__ = kwargs.pop('''labels''' , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ = args[0]
a__ = args[1:]
if input_features is not None:
a__ = self.feature_extractor.pad(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if labels is not None:
a__ = self.tokenizer.pad(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a__ = labels['''input_ids''']
return input_features
def _UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@contextmanager
def _UpperCAmelCase ( self ) -> Any:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
a__ = True
a__ = self.tokenizer
yield
a__ = self.feature_extractor
a__ = False
| 716
|
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# Check if the input is valid
if not len(__UpperCAmelCase ) == len(__UpperCAmelCase ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
a__ , a__ , a__ = equationa
a__ , a__ , a__ = equationa
# Calculate the determinants of the matrices
a__ = aa * ba - aa * ba
a__ = ca * ba - ca * ba
a__ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a__ = determinant_x / determinant
a__ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 148
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'vocab.json'}
_UpperCamelCase = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_UpperCamelCase = {'mgp-str': 27}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , _a : Optional[Any] , _a : Any="[GO]" , _a : Optional[Any]="[GO]" , _a : Optional[int]="[s]" , _a : Optional[int]="[GO]" , **_a : Union[str, Any] ) -> Optional[int]:
super().__init__(
unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , )
with open(_a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : List[str] = json.load(_a )
__lowerCamelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Union[str, Any] ) -> List[str]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , _a : Tuple ) -> int:
__lowerCamelCase : Optional[Any] = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def _lowercase ( self : Optional[Any] , _a : Any ) -> Dict:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def _lowercase ( self : List[Any] , _a : Dict ) -> Optional[int]:
return self.decoder.get(_a )
def _lowercase ( self : Union[str, Any] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error('Vocabulary path ({}) should be a directory'.format(_a ) )
return
__lowerCamelCase : List[Any] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '\n' )
return (vocab_file,)
| 459
| 0
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : str=False , **_UpperCAmelCase : Optional[Any] ):
super().__init__(**_A )
_A = vocab_size
_A = d_embed
_A = d_proj
_A = cutoffs + [vocab_size]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
_A = keep_order
_A = []
_A = []
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict ):
if self.n_clusters > 0:
_A = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_A , name='cluster_weight' )
_A = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_A , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_A = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_A , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_A )
else:
self.out_projs.append(_A )
_A = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_A , name=F'''out_layers_._{i}_._weight''' , )
_A = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_A , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.d_embed // (self.div_val**i)
_A = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_A , name=F'''out_projs_._{i}''' )
self.out_projs.append(_A )
_A = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_A , name=F'''out_layers_._{i}_._weight''' , )
_A = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_A , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_A )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=None ):
_A = x
if proj is not None:
_A = tf.einsum('ibd,ed->ibe' , _A , _A )
return tf.einsum('ibd,nd->ibn' , _A , _A ) + b
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
_A = shape_list(_A )
_A = tf.range(lp_size[0] , dtype=target.dtype )
_A = tf.stack([r, target] , 1 )
return tf.gather_nd(_A , _A )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=False ):
_A = 0
if self.n_clusters == 0:
_A = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_A = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A )
_A = tf.nn.log_softmax(_A , axis=-1 )
else:
_A = shape_list(_A )
_A = []
_A = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_A = (target >= l_idx) & (target < r_idx)
_A = tf.where(_A )
_A = tf.boolean_mask(_A , _A ) - l_idx
if self.div_val == 1:
_A = self.out_layers[0][0][l_idx:r_idx]
_A = self.out_layers[0][1][l_idx:r_idx]
else:
_A = self.out_layers[i][0]
_A = self.out_layers[i][1]
if i == 0:
_A = tf.concat([cur_W, self.cluster_weight] , 0 )
_A = tf.concat([cur_b, self.cluster_bias] , 0 )
_A = self._logit(_A , _A , _A , self.out_projs[0] )
_A = tf.nn.log_softmax(_A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_A = tf.boolean_mask(_A , _A )
_A = self._gather_logprob(_A , _A )
else:
_A = self._logit(_A , _A , _A , self.out_projs[i] )
_A = tf.nn.log_softmax(_A )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
_A = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A )
if target is not None:
_A = tf.boolean_mask(_A , _A )
_A = tf.boolean_mask(_A , _A )
_A = self._gather_logprob(_A , _A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A ) )
_A = tf.concat(_A , axis=-1 )
if target is not None:
if return_mean:
_A = tf.reduce_mean(_A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 714
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _snake_case ( _snake_case : List[Any] ) -> Any:
'''simple docstring'''
_A = {}
_A = tokenizer(example['content'] , truncation=_snake_case )['input_ids']
_A = len(example['content'] ) / len(output['input_ids'] )
return output
a = HfArgumentParser(PretokenizationArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a = time.time()
a = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a = time.time()
a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 505
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'microsoft/speecht5_tts'
_UpperCAmelCase = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_UpperCAmelCase = 'text_reader'
_UpperCAmelCase = SpeechTaProcessor
_UpperCAmelCase = SpeechTaForTextToSpeech
_UpperCAmelCase = SpeechTaHifiGan
_UpperCAmelCase = ['text']
_UpperCAmelCase = ['audio']
def snake_case ( self : Tuple ):
if self.post_processor is None:
lowerCamelCase :Any = '''microsoft/speecht5_hifigan'''
super().setup()
def snake_case ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Tuple=None ):
lowerCamelCase :int = self.pre_processor(text=__snake_case , return_tensors='''pt''' , truncation=__snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowerCamelCase :List[str] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowerCamelCase :Optional[int] = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case ( self : List[str] , __snake_case : int ):
with torch.no_grad():
return self.model.generate_speech(**__snake_case )
def snake_case ( self : List[Any] , __snake_case : List[Any] ):
with torch.no_grad():
return self.post_processor(__snake_case ).cpu().detach()
| 166
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
| 1
|
from collections import deque
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__ ,A__ ):
_A : int = process_name # process name
_A : str = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_A : List[Any] = arrival_time
_A : Dict = burst_time # remaining burst time
_A : List[Any] = 0 # total time of the process wait in ready queue
_A : Any = 0 # time from arrival time to completion time
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__ ,A__ ,A__ ,):
# total number of mlfq's queues
_A : int = number_of_queues
# time slice of queues that round robin algorithm applied
_A : int = time_slices
# unfinished process is in this ready_queue
_A : Optional[int] = queue
# current time
_A : Optional[Any] = current_time
# finished process is in this sequence queue
_A : deque[Process] = deque()
def A__ ( self ):
_A : List[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A__ ( self ,A__ ):
_A : Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A__ ( self ,A__ ):
_A : Dict = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A__ ( self ,A__ ):
_A : int = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A__ ( self ,A__ ):
return [q.burst_time for q in queue]
def A__ ( self ,A__ ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A__ ( self ,A__ ):
_A : deque[Process] = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
_A : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_A : int = 0
# set the process's turnaround time because it is finished
_A : str = self.current_time - cp.arrival_time
# set the completion time
_A : List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A__ ( self ,A__ ,A__ ):
_A : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
_A : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_A : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_A : Optional[Any] = 0
# set the finish time
_A : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
_A : Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A__ ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
_A : List[str] = self.round_robin(
self.ready_queue ,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCamelCase : Optional[Any] =Process('P1', 0, 53)
_UpperCamelCase : List[Any] =Process('P2', 0, 17)
_UpperCamelCase : Any =Process('P3', 0, 68)
_UpperCamelCase : Any =Process('P4', 0, 24)
_UpperCamelCase : Dict =3
_UpperCamelCase : Dict =[17, 25]
_UpperCamelCase : Any =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_UpperCamelCase : Any =Process('P1', 0, 53)
_UpperCamelCase : Any =Process('P2', 0, 17)
_UpperCamelCase : Union[str, Any] =Process('P3', 0, 68)
_UpperCamelCase : Dict =Process('P4', 0, 24)
_UpperCamelCase : str =3
_UpperCamelCase : int =[17, 25]
_UpperCamelCase : List[Any] =deque([Pa, Pa, Pa, Pa])
_UpperCamelCase : Optional[int] =MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCamelCase : Any =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 713
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] ={
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = "xmod"
def __init__( self ,A__=30522 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=512 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=2 ,A__=False ,A__=True ,A__=True ,A__=("en_XX",) ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
_A : Union[str, Any] = vocab_size
_A : List[str] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = hidden_act
_A : Optional[int] = intermediate_size
_A : List[str] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = initializer_range
_A : Tuple = layer_norm_eps
_A : int = position_embedding_type
_A : str = use_cache
_A : int = classifier_dropout
_A : Optional[Any] = pre_norm
_A : Dict = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[Any] = adapter_reuse_layer_norm
_A : Optional[Any] = ln_before_adapter
_A : int = list(A__ )
_A : str = default_language
class UpperCAmelCase__ ( __snake_case ):
@property
def A__ ( self ):
if self.task == "multiple-choice":
_A : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__ ( unittest.TestCase , A_ ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : List[str] = load_tool('''text-classification''' )
self.tool.setup()
_lowercase : str = load_tool('''text-classification''' , remote=UpperCamelCase )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Optional[Any] = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(UpperCamelCase , '''positive''' )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Tuple = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(UpperCamelCase , '''positive''' )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(UpperCamelCase , '''positive''' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : int = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(UpperCamelCase , '''positive''' )
| 322
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 322
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 718
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = ['''image_processor''', '''tokenizer''']
__A : Any = '''ChineseCLIPImageProcessor'''
__A : Tuple = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
a__ : Optional[Any] = kwargs.pop('feature_extractor')
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase , lowercase)
a__ : List[str] = self.image_processor
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
a__ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase)
if images is not None:
a__ : Optional[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase)
if text is not None and images is not None:
a__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) , tensor_type=lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.tokenizer.model_input_names
a__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
| 392
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Tuple=False ):
lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowercase = """"""
else:
lowercase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[
: config.hidden_size, :
]
lowercase = in_proj_bias[: config.hidden_size]
lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase = in_proj_weight[
-config.hidden_size :, :
]
lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( lowercase_ : Any ):
lowercase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
lowercase = dct.pop(lowercase_ )
lowercase = val
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : List[Any] ):
lowercase = ViTConfig()
lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase = True
lowercase = int(vit_name[-12:-10] )
lowercase = int(vit_name[-9:-6] )
else:
lowercase = 1000
lowercase = """huggingface/label-files"""
lowercase = """imagenet-1k-id2label.json"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = int(vit_name[-6:-4] )
lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
lowercase = 192
lowercase = 768
lowercase = 12
lowercase = 3
elif vit_name[9:].startswith("""small""" ):
lowercase = 384
lowercase = 1536
lowercase = 12
lowercase = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
lowercase = 768
lowercase = 2304
lowercase = 8
lowercase = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
lowercase = 1024
lowercase = 4096
lowercase = 24
lowercase = 16
elif vit_name[4:].startswith("""huge""" ):
lowercase = 1280
lowercase = 5120
lowercase = 32
lowercase = 16
# load original model from timm
lowercase = timm.create_model(lowercase_ , pretrained=lowercase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase_ )
lowercase = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase = ViTModel(lowercase_ ).eval()
else:
lowercase = ViTForImageClassification(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase = DeiTImageProcessor(size=config.image_size )
else:
lowercase = ViTImageProcessor(size=config.image_size )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = encoding["""pixel_values"""]
lowercase = model(lowercase_ )
if base_model:
lowercase = timm_model.forward_features(lowercase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase_ , outputs.pooler_output , atol=1E-3 )
else:
lowercase = timm_model(lowercase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 588
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowercase_ : Dict = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
return sd
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any=rename_keys_prefix ):
lowercase = OrderedDict()
lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase = key
for name_pair in rename_keys_prefix:
lowercase = new_key.replace(name_pair[0] , name_pair[1] )
lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : int ):
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase = """pretraining"""
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 512}
lowercase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
lowercase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
lowercase = """vqa"""
elif "nlvr" in checkpoint_path:
lowercase = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
lowercase = """nlvr"""
lowercase = VisualBertConfig(**lowercase_ )
# Load State Dict
lowercase = load_state_dict(lowercase_ )
lowercase = get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
lowercase = VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
lowercase = VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
lowercase = VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
lowercase = VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 588
| 1
|
from __future__ import annotations
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_lowercase : str = number_of_bytes // partitions
_lowercase : Dict = []
for i in range(__UpperCAmelCase ):
_lowercase : List[str] = i * bytes_per_partition + 1
_lowercase : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
return "".join(sorted(__UpperCAmelCase ) )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
return word_by_signature[signature(__UpperCAmelCase )]
SCREAMING_SNAKE_CASE = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 283
| 0
|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = XLMProphetNetTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : List[str] = True
def lowerCAmelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Any ):
_A = '[PAD]'
_A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowerCAmelCase_ ( self : List[str] ):
_A = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowerCAmelCase_ ( self : Any ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowerCAmelCase_ ( self : Tuple ):
_A = 'Hello World!'
_A = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
# fmt: off
_A = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 7
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__a = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["input_ids", "attention_mask"]
lowercase = NllbTokenizer
lowercase = []
lowercase = []
def __init__( self : List[str] , snake_case_ : int=None , snake_case_ : Optional[int]=None , snake_case_ : Dict="<s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : Union[str, Any]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Tuple="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
snake_case__ : str = legacy_behaviour
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , )
snake_case__ : Optional[Any] = vocab_file
snake_case__ : str = False if not self.vocab_file else True
snake_case__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case__ : Optional[int] = {
lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case__ : Any = src_lang if src_lang is not None else """eng_Latn"""
snake_case__ : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
snake_case__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def lowerCamelCase ( self : str , snake_case_ : str ):
snake_case__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : Any = src_lang
snake_case__ : str = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
snake_case__ : Dict = self.convert_tokens_to_ids(snake_case_ )
snake_case__ : str = tgt_lang_id
return inputs
def lowerCamelCase ( self : int , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : str , ):
snake_case__ : str = src_lang
snake_case__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowerCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : List[str] ):
snake_case__ : List[Any] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
snake_case__ : Tuple = []
snake_case__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Tuple = [self.cur_lang_code]
snake_case__ : int = [self.eos_token_id]
snake_case__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : int , snake_case_ : str ):
snake_case__ : List[str] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
snake_case__ : int = []
snake_case__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Dict = [self.cur_lang_code]
snake_case__ : Dict = [self.eos_token_id]
snake_case__ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case__ : Dict = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 374
| 0
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a__ (__lowercase :str , __lowercase :int ) -> Dict:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a__ (__lowercase :Union[str, Any] , __lowercase :str , __lowercase :str , __lowercase :str ) -> int:
_A : str = tmp_path / '''cache'''
_A : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a__ (__lowercase :Dict , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> List[str]:
_A : Union[str, Any] = tmp_path / '''cache'''
_A : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_A : Tuple = features.copy() if features else default_expected_features
_A : Union[str, Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowercase , cache_dir=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
def a__ (__lowercase :Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(__lowercase ) ) as con:
_A : List[str] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def a__ (__lowercase :Tuple , __lowercase :List[str] , __lowercase :Tuple ) -> str:
_A : Optional[int] = tmp_path / '''cache'''
_A : Dict = os.path.join(__lowercase , '''tmp.sql''' )
_A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_A : Dict = iter_sql_file(__lowercase )
_A : Any = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Optional[Any] , __lowercase :Tuple , __lowercase :Union[str, Any] ) -> Union[str, Any]:
_A : Optional[Any] = tmp_path / '''cache'''
_A : Union[str, Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_A : Union[str, Any] = iter_sql_file(__lowercase )
_A : str = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> Any:
_A : Optional[int] = tmp_path / '''cache'''
_A : Optional[Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
with pytest.raises(__lowercase ):
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 332
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCamelCase : Tuple ='Hello, World!'
_UpperCamelCase : List[str] ='en_XX'
def a__ (__lowercase :str , __lowercase :str , __lowercase :bool ) -> Tuple:
_A : Optional[int] = Path('''data_bin''' )
_A : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__lowercase ).parent ) , checkpoint_file=Path(__lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__lowercase )
_A : Union[str, Any] = xmod.model.encoder.sentence_encoder
_A : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_A : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __lowercase )
_A : List[str] = XmodForSequenceClassification(__lowercase ) if classification_head else XmodForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_A : Optional[Any] = xmod_sent_encoder.embed_tokens.weight
_A : Dict = xmod_sent_encoder.embed_positions.weight
_A : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_A : Tuple = xmod_sent_encoder.layernorm_embedding.weight
_A : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_A : Optional[Any] = model.roberta.encoder.layer[i]
_A : Dict = xmod_sent_encoder.layers[i]
# self attention
_A : int = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_A : Any = xmod_layer.self_attn.q_proj.weight
_A : List[str] = xmod_layer.self_attn.q_proj.bias
_A : str = xmod_layer.self_attn.k_proj.weight
_A : Optional[int] = xmod_layer.self_attn.k_proj.bias
_A : Dict = xmod_layer.self_attn.v_proj.weight
_A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
_A : Dict = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_A : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
_A : List[str] = xmod_layer.self_attn.out_proj.bias
_A : Tuple = xmod_layer.self_attn_layer_norm.weight
_A : Optional[int] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_A : List[Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_A : Any = xmod_layer.fca.weight
_A : str = xmod_layer.fca.bias
# output
_A : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_A : Any = xmod_layer.fca.weight
_A : Union[str, Any] = xmod_layer.fca.bias
_A : Tuple = xmod_layer.final_layer_norm.weight
_A : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_A : int = xmod_layer.adapter_layer_norm.weight
_A : Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_A : List[Any] = bert_output.adapter_modules[lang_code]
_A : Dict = xmod_layer.adapter_modules[lang_code]
_A : Tuple = from_adapter.fca.weight
_A : Optional[Any] = from_adapter.fca.bias
_A : str = from_adapter.fca.weight
_A : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_A : str = xmod_sent_encoder.layer_norm.weight
_A : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
_A : int = xmod.model.classification_heads['''mnli'''].dense.weight
_A : List[str] = xmod.model.classification_heads['''mnli'''].dense.bias
_A : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
_A : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_A : Any = xmod.model.encoder.lm_head.dense.weight
_A : Optional[int] = xmod.model.encoder.lm_head.dense.bias
_A : str = xmod.model.encoder.lm_head.layer_norm.weight
_A : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
_A : Tuple = xmod.model.encoder.lm_head.weight
_A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_A : Dict = xmod.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__lowercase )
_A : str = model(__lowercase )[0]
if classification_head:
_A : int = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__lowercase ) )
else:
_A : int = xmod.model(__lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_A : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_A : List[Any] = torch.allclose(__lowercase , __lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCamelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase : str =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 332
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( A_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
| 88
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.dummy_uncond_unet
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vq_model
lowercase_ = LDMPipeline(unet=UpperCamelCase__ , vqvae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" ).images
lowercase_ = torch.manual_seed(0 )
lowercase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=UpperCamelCase__ )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase_ = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=5 , output_type="""numpy""" ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase_ = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase_ = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 412
| 0
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A_ ( __UpperCamelCase : int ):
def wrapper(*__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ):
lowercase = timeit.default_timer()
lowercase = func(*__UpperCamelCase , **__UpperCamelCase )
lowercase = timeit.default_timer() - starttime
return delta
lowercase = func.__name__
return wrapper
def A_ ( __UpperCamelCase : dict , __UpperCamelCase : Dict=1_00 , __UpperCamelCase : Optional[Any]=None ):
lowercase = []
lowercase = seq_shapes or {}
for i in range(__UpperCamelCase ):
lowercase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCamelCase , _ArrayXD ):
lowercase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCamelCase , datasets.Value ):
if v.dtype == "string":
lowercase = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowercase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCamelCase , datasets.Sequence ):
while isinstance(__UpperCamelCase , datasets.Sequence ):
lowercase = v.feature
lowercase = seq_shapes[k]
lowercase = np.random.rand(*__UpperCamelCase ).astype(v.dtype )
lowercase = data
dummy_data.append((i, example) )
return dummy_data
def A_ ( __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str]=1_00 , __UpperCamelCase : Tuple=None ):
lowercase = generate_examples(__UpperCamelCase , num_examples=__UpperCamelCase , seq_shapes=__UpperCamelCase )
with ArrowWriter(features=__UpperCamelCase , path=__UpperCamelCase ) as writer:
for key, record in dummy_data:
lowercase = features.encode_example(__UpperCamelCase )
writer.write(__UpperCamelCase )
lowercase , lowercase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
lowercase = datasets.Dataset.from_file(filename=__UpperCamelCase , info=datasets.DatasetInfo(features=__UpperCamelCase ) )
return dataset
| 704
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __UpperCamelCase : str , __UpperCamelCase : dict ):
lowercase = BeautifulSoup(requests.get(__UpperCamelCase , params=__UpperCamelCase ).content , '''html.parser''' )
lowercase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 396
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCamelCase : Any = logging.get_logger(__name__)
_UpperCamelCase : Dict = {'vocab_file': 'vocab.txt'}
_UpperCamelCase : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_UpperCamelCase : Tuple = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
_UpperCamelCase : Optional[Any] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class snake_case__ ( _A):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str , _A : Optional[int]=None , _A : int=None , _A : Union[str, Any]=True , _A : str="[UNK]" , _A : List[str]="[SEP]" , _A : Any="[PAD]" , _A : Optional[int]="[CLS]" , _A : List[str]="[MASK]" , _A : List[str]=True , _A : int=None , **_A : Optional[int] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
UpperCAmelCase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Union[str, Any] = getattr(__snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : Union[str, Any] = strip_accents
UpperCAmelCase_ : Tuple = tokenize_chinese_chars
UpperCAmelCase_ : Any = normalizer_class(**__snake_case )
UpperCAmelCase_ : List[str] = do_lower_case
def A ( self : str , _A : List[Any] , _A : str=None ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> int:
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> str:
UpperCAmelCase_ : Optional[int] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 541
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE__ = i + 1
else:
SCREAMING_SNAKE_CASE__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 710
|
import os
__lowerCamelCase : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while index < len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ = line.strip()
SCREAMING_SNAKE_CASE__ = parse_roman_numerals(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 379
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_snake_case , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_snake_case , '''num_encoder_blocks''' ) )
class __A:
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[16, 32, 64, 128] , _snake_case=[1, 4, 8, 16] , _snake_case=[1, 2, 4, 8] , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , ) -> Union[str, Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = num_encoder_blocks
__a = sr_ratios
__a = depths
__a = hidden_sizes
__a = downsampling_rates
__a = num_attention_heads
__a = is_training
__a = use_labels
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = num_labels
__a = scope
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case )
__a = __a = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = self.num_labels
__a = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__a = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = 1
__a = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
__a = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = SegformerModelTester(self )
__a = SegformerConfigTester(self , config_class=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.attentions
__a = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
__a = (self.model_tester.image_size // 4) ** 2
__a = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__a = (self.model_tester.image_size // 32) ** 2
__a = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__a = len(_snake_case )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
__a = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
__a = (self.model_tester.image_size // 4) ** 2
__a = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.hidden_states
__a = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
__a = model_class(_snake_case )
model.to(_snake_case )
model.train()
__a = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
__a = model(**_snake_case ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __lowerCAmelCase ( ) -> List[str]:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
__a = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_snake_case )
__a = prepare_img()
__a = image_processor(images=_snake_case , return_tensors='''pt''' )
__a = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case )
__a = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _snake_case )
__a = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
__a = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_snake_case )
__a = prepare_img()
__a = image_processor(images=_snake_case , return_tensors='''pt''' )
__a = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case )
__a = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _snake_case )
__a = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
__a = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_snake_case )
__a = prepare_img()
__a = image_processor(images=_snake_case , return_tensors='''pt''' )
__a = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(500, 300)] )
__a = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _snake_case )
__a = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
__a = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 219
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> str:
'''simple docstring'''
__a = np.random.RandomState(_snake_case )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = pipe(**_snake_case ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = 3 * [inputs['''prompt''']]
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
__a = self.get_dummy_inputs()
__a = 3 * [inputs.pop('''prompt''' )]
__a = pipe.tokenizer(
_snake_case , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''np''' , )
__a = text_inputs['''input_ids''']
__a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__a = prompt_embeds
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_snake_case )
__a = self.get_dummy_inputs()
__a = 3 * ['''this is a negative prompt''']
__a = negative_prompt
__a = 3 * [inputs['''prompt''']]
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
__a = self.get_dummy_inputs()
__a = 3 * [inputs.pop('''prompt''' )]
__a = []
for p in [prompt, negative_prompt]:
__a = pipe.tokenizer(
_snake_case , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors='''np''' , )
__a = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__a , __a = embeds
# forward
__a = pipe(**_snake_case )
__a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''open neural network exchange'''
__a = np.random.RandomState(0 )
__a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__a = '''open neural network exchange'''
__a = np.random.RandomState(0 )
__a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='''np''' )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = 0
def test_callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
__a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__a = latents[0, -3:, -3:, -1]
__a = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__a = latents[0, -3:, -3:, -1]
__a = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__a = False
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
__a = '''Andromeda galaxy in a bottle'''
__a = np.random.RandomState(0 )
pipe(
prompt=_snake_case , num_inference_steps=5 , guidance_scale=7.5 , generator=_snake_case , callback=_snake_case , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_snake_case , _snake_case )
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
__a = OnnxStableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__a = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 219
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A_ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
__lowerCAmelCase : int = getattr(_UpperCAmelCase ,_UpperCAmelCase )
if weight_type is not None:
__lowerCAmelCase : Any = getattr(_UpperCAmelCase ,_UpperCAmelCase ).shape
else:
__lowerCAmelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase : Any = value
elif weight_type == "weight_g":
__lowerCAmelCase : Any = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : Optional[Any] = value
else:
__lowerCAmelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : List[Any] = fairseq_model.state_dict()
__lowerCAmelCase : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,hf_model.config.feat_extract_norm == 'group' ,)
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : Any = True
if "*" in mapped_key:
__lowerCAmelCase : Any = name.split(_UpperCAmelCase )[0].split('.' )[-2]
__lowerCAmelCase : Union[str, Any] = mapped_key.replace('*' ,_UpperCAmelCase )
if "weight_g" in name:
__lowerCAmelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : List[str] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[int] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[Any] = 'weight'
else:
__lowerCAmelCase : str = None
set_recursively(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Union[str, Any] = name.split('.' )
__lowerCAmelCase : List[str] = int(items[0] )
__lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def A ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any=None ) -> Dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = torch.load(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : int = WavLMOrig(_UpperCAmelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCAmelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : str = WavLMModel(_UpperCAmelCase )
recursively_load_weights(_UpperCAmelCase ,_UpperCAmelCase )
hf_wavlm.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 123
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def snake_case ( self ) -> Optional[Any]:
raise NotImplementedError()
| 123
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,):
if config_name_or_path is None:
lowerCamelCase_ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowerCamelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase_ = question_encoder_name_or_path
lowerCamelCase_ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowerCamelCase_ = RagConfig.from_pretrained(lowerCAmelCase__ )
lowerCamelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
lowerCamelCase_ = AutoConfig.from_pretrained(lowerCAmelCase__ )
lowerCamelCase_ = gen_config
lowerCamelCase_ = question_encoder_config
lowerCamelCase_ = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase__ ,lowerCAmelCase__ ,config=lowerCAmelCase__ )
rag_model.save_pretrained(lowerCAmelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase__ )
# Save tokenizers.
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
A_ = parser.parse_args()
A_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 29
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : str = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 156
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase : Any = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase__ :
lowerCAmelCase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
lowerCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
A = import_module('tasks' )
try:
A = getattr(UpperCamelCase__ , model_args.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A = token_classification_task.get_labels(data_args.labels )
A = dict(enumerate(UpperCamelCase__ ) )
A = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase__ , UpperCamelCase__ ) -> Tuple[List[int], List[int]]:
A = np.argmax(UpperCamelCase__ , axis=2 )
A , A = preds.shape
A = [[] for _ in range(UpperCamelCase__ )]
A = [[] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase__ ) -> Dict:
A , A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
# Data collator
A = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCamelCase__ )
# Predict
if training_args.do_predict:
A = TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A , A , A = trainer.predict(UpperCamelCase__ )
A , A = align_predictions(UpperCamelCase__ , UpperCamelCase__ )
A = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
A = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return results
def __snake_case ( UpperCamelCase__ ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 91
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase : Optional[Any] = float("nan")
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Union[str, Any] ):
A = sys.stdout
A = open(_lowercase , 'a' )
def __getattr__( self : Tuple , _lowercase : Union[str, Any] ):
return getattr(self.stdout , _lowercase )
def __a ( self : Optional[int] , _lowercase : Union[str, Any] ):
self.stdout.write(_lowercase )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _lowercase , 0 , re.M ) )
def __snake_case ( UpperCamelCase__=80 , UpperCamelCase__=False ) -> Union[str, Any]:
"""simple docstring"""
A = []
# deal with critical env vars
A = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
A = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A = []
A = ''
while len(UpperCamelCase__ ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
A = ''
return "\\\n".join(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
A = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
A = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stdout.txt' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f'log.{prefix}.stderr.txt' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f:
A = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
"""simple docstring"""
A = []
A = []
A = f'{id}: {variation:<{longest_variation_len}}'
A = f'{preamble}: '
A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
A = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
A = f'\33[2K\r{outcome}'
if len(UpperCamelCase__ ) > 0:
A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A = round(mean_metrics[target_metric_key] , 2 )
A = f'{outcome} {mean_target}'
if len(UpperCamelCase__ ) > 1:
results_str += f' {tuple(round(UpperCamelCase__ , 2 ) for x in results )}'
print(UpperCamelCase__ )
A = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
A = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
A = pd.DataFrame(UpperCamelCase__ )
A = 'variation'
A = 'diff_%'
A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
A = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A = df.reindex(UpperCamelCase__ , axis='columns' ) # reorder cols
# capitalize
A = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '<br>' ) , axis='columns' )
A = df.rename(lambda UpperCamelCase__ : c.replace('_' , '\n' ) , axis='columns' )
A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase__ ) )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs='+' , required=UpperCamelCase__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
A = parser.parse_args()
A = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
A = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
A = [list(map(str.strip , re.split(r'\|' , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase__ ) ) ) )
A = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
A = args.report_metric_keys.split()
# capture prints into a log file for convenience
A = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
A = Tee(UpperCamelCase__ )
print(f'\n*** Running {len(UpperCamelCase__ )} benchmarks:' )
print(f'Base command: {" ".join(UpperCamelCase__ )}' )
A = 'variation'
A = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc='Total completion: ' , leave=UpperCamelCase__ ) ):
A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 91
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __a :
def __init__( self : str ,lowerCamelCase : List[Any] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = 13
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 99
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 37
__SCREAMING_SNAKE_CASE = """gelu"""
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0.02
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Any ,lowerCamelCase : List[str] ,lowerCamelCase : Any ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDistilBertModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDistilBertForMaskedLM(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Any ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDistilBertForQuestionAnswering(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Any ,lowerCamelCase : Any ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFDistilBertForSequenceClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = TFDistilBertForMultipleChoice(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFDistilBertForTokenClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : int = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCamelCase : Any = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDistilBertModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,dim=37 )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__SCREAMING_SNAKE_CASE = TFDistilBertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = [1, 6, 768]
self.assertEqual(output.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowerCamelCase ,atol=1E-4 )
| 109
|
"""simple docstring"""
from string import ascii_uppercase
lowerCAmelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 3_6:
raise ValueError("base must be <= 36" )
lowerCAmelCase : Any = ""
lowerCAmelCase : Dict = 0
lowerCAmelCase : Tuple = 0
while div != 1:
lowerCAmelCase , lowerCAmelCase : List[Any] = divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if base >= 1_1 and 9 < mod < 3_6:
lowerCAmelCase : Any = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE )]
else:
lowerCAmelCase : Dict = str(SCREAMING_SNAKE_CASE )
new_value += actual_value
lowerCAmelCase : Dict = num // base
lowerCAmelCase : str = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 645
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : str = len(UpperCamelCase ), len(grid[0] )
if (
min(UpperCamelCase , UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__UpperCAmelCase : Union[str, Any] = 0
count += depth_first_search(UpperCamelCase , row + 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , row - 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col + 1 , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col - 1 , UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__ :
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def a_ ( cls : List[str] , UpperCamelCase_ : CommonSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray):
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_)
@dataclass
class a__ ( __magic_name__ ):
lowercase_ = 42
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 1000 , UpperCamelCase_ : float = 0.0001 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : str = "linear" , UpperCamelCase_ : Optional[jnp.ndarray] = None , UpperCamelCase_ : str = "fixed_small" , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = dtype
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[CommonSchedulerState] = None):
"""simple docstring"""
if common is None:
__UpperCAmelCase : Tuple = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__UpperCAmelCase : Tuple = jnp.array(1.0 , dtype=self.dtype)
__UpperCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[int] = None):
"""simple docstring"""
return sample
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : int , UpperCamelCase_ : Tuple = ()):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : List[str] = (jnp.arange(0 , UpperCamelCase_) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None):
"""simple docstring"""
__UpperCAmelCase : List[str] = state.common.alphas_cumprod[t]
__UpperCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCAmelCase : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCAmelCase : str = jnp.clip(UpperCamelCase_ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[int] = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1e-20))
elif variance_type == "fixed_large":
__UpperCAmelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCAmelCase : str = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCAmelCase : Any = variance
__UpperCAmelCase : Union[str, Any] = state.common.betas[t]
__UpperCAmelCase : List[str] = (predicted_variance + 1) / 2
__UpperCAmelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def a_ ( self : Optional[int] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[jax.random.KeyArray] = None , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if key is None:
__UpperCAmelCase : List[str] = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase , __UpperCAmelCase : int = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1)
else:
__UpperCAmelCase : List[str] = None
# 1. compute alphas, betas
__UpperCAmelCase : str = state.common.alphas_cumprod[t]
__UpperCAmelCase : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : List[Any] = jnp.clip(UpperCamelCase_ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCAmelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCAmelCase : int = jax.random.split(UpperCamelCase_ , num=1)
__UpperCAmelCase : Any = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_) ** 0.5) * noise
__UpperCAmelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__UpperCAmelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def __len__( self : int):
"""simple docstring"""
return self.config.num_train_timesteps
| 487
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10
|
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__A = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs[0].cpu().numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
snake_case_ = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
snake_case_ = after_outputs[0].cpu().numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
snake_case_ = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 46
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A = parser.parse_args()
A = 'cpu'
A = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
A = 'path-to-your-trained-model'
A = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A = pipe.to(device)
# to channels last
A = pipe.unet.to(memory_format=torch.channels_last)
A = pipe.vae.to(memory_format=torch.channels_last)
A = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A = torch.randn(2, 4, 64, 64)
A = torch.rand(1) * 999
A = torch.randn(2, 77, 768)
A = (sample, timestep, encoder_hidden_status)
try:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A = 666
A = torch.Generator(device).manual_seed(seed)
A = {'generator': generator}
if args.steps is not None:
A = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 46
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ = " " ) ->list:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = 0
for index, char in enumerate(UpperCAmelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
__UpperCAmelCase : List[Any] = index + 1
elif index + 1 == len(UpperCAmelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : str = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
_A : Union[str, Any] = 'CIDAS/clipseg-rd64-refined'
_A : Tuple = 'image_segmenter'
_A : List[Any] = CLIPSegForImageSegmentation
_A : List[str] = ['image', 'text']
_A : Optional[int] = ['image']
def __init__( self : List[str] , *__lowercase : Union[str, Any] , **__lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*__lowercase , **__lowercase )
def A_ ( self : int , __lowercase : "Image" , __lowercase : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__lowercase , return_tensors='''pt''' )
def A_ ( self : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
with torch.no_grad():
__UpperCAmelCase : List[str] = self.model(**__lowercase ).logits
return logits
def A_ ( self : int , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Any = outputs.cpu().detach().numpy()
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Any = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 522
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
@dataclass
class _lowerCAmelCase :
A__ = 42
A__ = field(default_factory=_lowercase )
A__ = field(default_factory=_lowercase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = len(list(m.modules() ) ) == 1 or isinstance(__UpperCAmelCase , nn.Convad ) or isinstance(__UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def __magic_name__( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
A__ = 42
A__ = 42
A__ = 1
A__ = field(default_factory=_lowercase )
A__ = field(default_factory=_lowercase )
A__ = True
def __call__( self , __UpperCAmelCase ):
lowerCAmelCase__ : List[str] = Tracker(self.dest )(__UpperCAmelCase ).parametrized
lowerCAmelCase__ : List[str] = Tracker(self.src )(__UpperCAmelCase ).parametrized
lowerCAmelCase__ : Optional[int] = list(filter(lambda __UpperCAmelCase : type(__UpperCAmelCase ) not in self.src_skip , __UpperCAmelCase ) )
lowerCAmelCase__ : Dict = list(filter(lambda __UpperCAmelCase : type(__UpperCAmelCase ) not in self.dest_skip , __UpperCAmelCase ) )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(__UpperCAmelCase )} operations while"""
f""" destination module has {len(__UpperCAmelCase )}.""" )
for dest_m, src_m in zip(__UpperCAmelCase , __UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
super().__init__()
lowerCAmelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"""Unexpected layer name {k}"""
lowerCAmelCase__ : Tuple = len(__UpperCAmelCase ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
lowerCAmelCase__ : int = nn.ModuleDict(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase ):
return get_trunk_forward_outputs(
__UpperCAmelCase , out_feat_keys=__UpperCAmelCase , feature_blocks=self._feature_blocks , )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , __UpperCAmelCase ):
# default to timm!
if x not in self:
lowerCAmelCase__ : Dict = self.convert_name_to_timm(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = partial(lambda: (timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval(), None) )
else:
lowerCAmelCase__ : Union[str, Any] = super().__getitem__(__UpperCAmelCase )
return val
class _lowerCAmelCase ( _lowercase ):
def __getitem__( self , __UpperCAmelCase ):
if "seer" in x and "in1k" not in x:
lowerCAmelCase__ : Dict = RegNetModel
else:
lowerCAmelCase__ : Dict = RegNetForImageClassification
return val
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for from_key, to_key in keys:
lowerCAmelCase__ : int = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = True , ) -> Optional[int]:
print(F"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ : List[str] = from_model_func()
lowerCAmelCase__ : str = our_model_func(UpperCamelCase ).eval()
lowerCAmelCase__ : Optional[int] = ModuleTransfer(src=UpperCamelCase , dest=UpperCamelCase , raise_if_mismatch=UpperCamelCase )
lowerCAmelCase__ : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase )
if from_state_dict is not None:
lowerCAmelCase__ : Dict = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : List[str] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase__ : Dict = manually_copy_vissl_head(UpperCamelCase , our_model.state_dict() , UpperCamelCase )
our_model.load_state_dict(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = our_model(UpperCamelCase , output_hidden_states=UpperCamelCase )
lowerCAmelCase__ : Dict = (
our_outputs.logits if isinstance(UpperCamelCase , UpperCamelCase ) else our_outputs.last_hidden_state
)
lowerCAmelCase__ : List[str] = from_model(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = from_output[-1] if type(UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : List[str] = our_outputs.hidden_states[-1]
assert torch.allclose(UpperCamelCase , UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=UpperCamelCase , )
lowerCAmelCase__ : str = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase__ : List[str] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase , )
print(F"""Pushed {name}""" )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True ) -> Any:
lowerCAmelCase__ : Tuple = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : List[Any] = 1000
lowerCAmelCase__ : List[Any] = (1, num_labels)
lowerCAmelCase__ : Tuple = '''huggingface/label-files'''
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase__ : Union[str, Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = idalabel
lowerCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = partial(UpperCamelCase , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase )
lowerCAmelCase__ : Any = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
lowerCAmelCase__ : Tuple = NameToOurModelFuncMap()
lowerCAmelCase__ : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(UpperCamelCase , UpperCamelCase ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(UpperCamelCase , model_dir=str(UpperCamelCase ) , map_location='''cpu''' )
lowerCAmelCase__ : Tuple = model_func()
# check if we have a head, if yes add it
lowerCAmelCase__ : Dict = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase__ : str = model_state_dict['''trunk''']
model.load_state_dict(UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase__ : Optional[Any] = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Tuple = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : List[str] = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowerCAmelCase__ : Union[str, Any] = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Tuple = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : List[str] = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : List[Any] = partial(
UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , UpperCamelCase , UpperCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 711
|
def __lowerCAmelCase ( UpperCamelCase ) -> bool:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(UpperCamelCase ) == 1:
return True
lowerCAmelCase__ : Tuple = series[1] - series[0]
for index in range(len(UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCAmelCase ( UpperCamelCase ) -> float:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowerCAmelCase__ : int = 0
for val in series:
answer += val
return answer / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
| 0
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase_ = True
UpperCamelCase_ = '''ml.p3.2xlarge'''
UpperCamelCase_ = '''accelerate_sagemaker_execution_role'''
UpperCamelCase_ = '''hf-sm'''
UpperCamelCase_ = '''us-east-1'''
UpperCamelCase_ = 1
UpperCamelCase_ = '''accelerate-sagemaker-1'''
UpperCamelCase_ = '''1.6'''
UpperCamelCase_ = '''4.4'''
UpperCamelCase_ = '''train.py'''
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : str =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCAmelCase )
assert isinstance(converted_args['''do_train'''] , UpperCAmelCase )
assert isinstance(converted_args['''epochs'''] , UpperCAmelCase )
assert isinstance(converted_args['''learning_rate'''] , UpperCAmelCase )
assert isinstance(converted_args['''max_steps'''] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 94
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True ) ->Dict:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
snake_case__ = timm.create_model('levit_128s' , pretrained=UpperCAmelCase_ )
else:
snake_case__ = timm.create_model('levit_128' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 1_92:
snake_case__ = timm.create_model('levit_192' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 2_56:
snake_case__ = timm.create_model('levit_256' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 3_84:
snake_case__ = timm.create_model('levit_384' , pretrained=UpperCAmelCase_ )
from_model.eval()
snake_case__ = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
snake_case__ = OrderedDict()
snake_case__ = from_model.state_dict()
snake_case__ = list(from_model.state_dict().keys() )
snake_case__ = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
snake_case__ = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
snake_case__ = torch.randn((2, 3, 2_24, 2_24) )
snake_case__ = from_model(UpperCAmelCase_ )
snake_case__ = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ ), "The model logits don't match the original one."
snake_case__ = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True ) ->Any:
snake_case__ = 'imagenet-1k-id2label.json'
snake_case__ = 10_00
snake_case__ = (1, num_labels)
snake_case__ = 'huggingface/label-files'
snake_case__ = num_labels
snake_case__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
snake_case__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
snake_case__ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
snake_case__ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
a__ : Optional[Any] = parser.parse_args()
a__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 368
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
"""simple docstring"""
import functools
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or not all(isinstance(__UpperCAmelCase ,__UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__UpperCAmelCase ) != 3 or not all(isinstance(__UpperCAmelCase ,__UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__UpperCAmelCase ) == 0:
return 0
if min(__UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
_lowercase : List[Any] = set(__UpperCAmelCase )
@functools.cache
def dynamic_programming(__UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696
| 1
|
'''simple docstring'''
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase_ ) )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
# Base Case
if index == len(lowerCamelCase_ ):
return True
# Recursive Step
for i in range(lowerCamelCase_ ):
if valid_coloring(graph[index] , lowerCamelCase_ , lowerCamelCase_ ):
# Color current vertex
snake_case : Any =i
# Validate coloring
if util_color(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , index + 1 ):
return True
# Backtrack
snake_case : Optional[Any] =-1
return False
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
snake_case : str =[-1] * len(lowerCamelCase_ )
if util_color(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 0 ):
return colored_vertices
return []
| 718
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase_ ( a_ , a_ ):
__UpperCAmelCase = 'pixel_values'
__UpperCAmelCase = False
__UpperCAmelCase = TimmBackboneConfig
def __init__( self : Optional[Any], _snake_case : Any, **_snake_case : Tuple ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(_snake_case )
snake_case : Optional[Any] =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(_snake_case, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
snake_case : Optional[Any] =getattr(_snake_case, '''use_pretrained_backbone''', _snake_case )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case : Union[str, Any] =config.out_indices if getattr(_snake_case, '''out_indices''', _snake_case ) is not None else (-1,)
snake_case : str =timm.create_model(
config.backbone, pretrained=_snake_case, features_only=config.features_only, in_chans=config.num_channels, out_indices=_snake_case, **_snake_case, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case : Any =self._backbone.return_layers
snake_case : Tuple ={layer['''module''']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def __snake_case ( cls : Dict, _snake_case : Any, *_snake_case : List[str], **_snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case : List[str] =kwargs.pop('''config''', TimmBackboneConfig() )
snake_case : Tuple =kwargs.pop('''use_timm_backbone''', _snake_case )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
snake_case : int =kwargs.pop('''num_channels''', config.num_channels )
snake_case : Optional[int] =kwargs.pop('''features_only''', config.features_only )
snake_case : Optional[int] =kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
snake_case : Optional[int] =kwargs.pop('''out_indices''', config.out_indices )
snake_case : List[str] =TimmBackboneConfig(
backbone=_snake_case, num_channels=_snake_case, features_only=_snake_case, use_pretrained_backbone=_snake_case, out_indices=_snake_case, )
return super()._from_config(_snake_case, **_snake_case )
def __snake_case ( self : Any, _snake_case : Dict ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any], _snake_case : Dict, _snake_case : Tuple=None, _snake_case : int=None, _snake_case : Dict=None, **_snake_case : Any ):
'''simple docstring'''
snake_case : Dict =return_dict if return_dict is not None else self.config.use_return_dict
snake_case : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Dict =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case : Union[str, Any] =self._all_layers
snake_case : List[str] =self._backbone(_snake_case, **_snake_case )
snake_case : Tuple =self._return_layers
snake_case : Tuple =tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case : Optional[Any] =self._backbone(_snake_case, **_snake_case )
snake_case : List[str] =None
snake_case : List[Any] =tuple(_snake_case )
snake_case : Union[str, Any] =tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
snake_case : Optional[int] =(feature_maps,)
if output_hidden_states:
snake_case : Dict =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case, hidden_states=_snake_case, attentions=_snake_case )
| 136
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
__UpperCAmelCase : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 77
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class a__ ( nn.Module ):
def __init__( self : Union[str, Any]):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Optional[int] = nn.Linear(3 , 4)
__UpperCAmelCase : str = nn.BatchNormad(4)
__UpperCAmelCase : int = nn.Linear(4 , 5)
def a_ ( self : str , UpperCamelCase_ : List[str]):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_)))
class a__ ( unittest.TestCase ):
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , model.state_dict())
__UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json")
self.assertTrue(os.path.isfile(UpperCamelCase_))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat")
self.assertTrue(os.path.isfile(UpperCamelCase_))
# TODO: add tests on the fact weights are properly loaded
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_)
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {})
__UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat")
self.assertTrue(os.path.isfile(UpperCamelCase_))
self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}})
__UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"])
self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_))
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ModelForTest()
__UpperCAmelCase : Optional[int] = model.state_dict()
__UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k}
__UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key]))
__UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k}
__UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_)
# Duplicates are removed
__UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_)
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key]))
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2}
__UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"])
self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2})
__UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
__UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"])
self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
| 77
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622
|
import torch
from diffusers import StableDiffusionPipeline
a_ = """path-to-your-trained-model"""
a_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a_ = """A photo of sks dog in a bucket"""
a_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 622
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (__A , unittest.TestCase ):
"""simple docstring"""
_a : List[str] = FunnelTokenizer
_a : Any = FunnelTokenizerFast
_a : Optional[int] = True
_a : Optional[Any] = True
def _a ( self ):
"""simple docstring"""
super().setUp()
a_ = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self , **UpperCamelCase__ ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _a ( self , **UpperCamelCase__ ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = 'UNwant\u00E9d,running'
a_ = 'unwanted, running'
return input_text, output_text
def _a ( self ):
"""simple docstring"""
a_ = self.tokenizer_class(self.vocab_file )
a_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCamelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def _a ( self ):
"""simple docstring"""
a_ = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
a_ = tokenizer('UNwant\u00E9d,running' )
a_ = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
a_ = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 536
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("T")
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = data
a_ = None
def __str__( self ):
"""simple docstring"""
return f'{self.data}'
class __SCREAMING_SNAKE_CASE (Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
a_ = None
def __iter__( self ):
"""simple docstring"""
a_ = self.top
while node:
yield node.data
a_ = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self ):
"""simple docstring"""
return self.top is None
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = Node(UpperCamelCase__ )
if not self.is_empty():
a_ = self.top
a_ = node
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
a_ = self.top
a_ = self.top.next
return pop_node.data
def _a ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def _a ( self ):
"""simple docstring"""
a_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 536
| 1
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__lowercase , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__lowercase , default=5 )
parser.add_argument('--batch_size' , type=__lowercase , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__lowercase , default=1 )
parser.add_argument('--freeze' , type=__lowercase , default=__lowercase )
parser.add_argument('--learning_rate' , type=__lowercase , default=5e-4 )
parser.add_argument('--seed' , type=__lowercase , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__lowercase , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__lowercase , default=10 )
parser.add_argument('--weight_decay' , type=__lowercase , default=0.0_1 )
parser.add_argument('--output_dir' , type=__lowercase , default='./results' )
return parser.parse_args()
a__ : Dict =load('''accuracy''')
def lowercase__ ( __lowercase : Tuple ) -> str:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = eval_pred
__UpperCamelCase = np.argmax(__lowercase , axis=1 )
return metric.compute(predictions=__lowercase , references=__lowercase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , __A : Optional[Any] ):
super().__init__()
__UpperCamelCase = trainer
def _lowerCamelCase ( self : Dict , __A : Any , __A : Union[str, Any] , __A : Tuple , **__A : Tuple ):
if control.should_evaluate:
__UpperCamelCase = deepcopy(__A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = get_args()
set_seed(args.seed )
__UpperCamelCase = load_dataset('codeparrot/codecomplex' , split='train' )
__UpperCamelCase = dataset.train_test_split(test_size=0.2 )
__UpperCamelCase = train_test['test'].train_test_split(test_size=0.5 )
__UpperCamelCase = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCamelCase = tokenizer.eos_token
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__UpperCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__UpperCamelCase = False
__UpperCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__lowercase : Union[str, Any] ):
__UpperCamelCase = tokenizer(example['src'] , truncation=__lowercase , max_length=1024 )
__UpperCamelCase = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__UpperCamelCase = train_test_validation.map(
__lowercase , batched=__lowercase , remove_columns=train_test_validation['train'].column_names , )
__UpperCamelCase = DataCollatorWithPadding(tokenizer=__lowercase )
__UpperCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__UpperCamelCase = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , )
print('Training...' )
trainer.add_callback(CustomCallback(__lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 434
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase__ ( __lowercase : int , __lowercase : int = 2 , __lowercase : int = 1 , __lowercase : int = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowercase : int , __lowercase : int , __lowercase : int ) -> int:
return (pow(__lowercase , 2 ) + step) % modulus
for _ in range(__lowercase ):
# These track the position within the cycle detection logic.
__UpperCamelCase = seed
__UpperCamelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__UpperCamelCase = gcd(hare - tortoise , __lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__UpperCamelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a__ : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
a__ : List[Any] =parser.parse_args()
a__ : str =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
a__ : Any =args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 434
| 1
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
A__ = prime_factors(UpperCAmelCase_ )
if is_square_free(UpperCAmelCase_ ):
return -1 if len(UpperCAmelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : list[str] ) -> str:
__snake_case = ''''''
for word_or_phrase in separated:
if not isinstance(snake_case_ , snake_case_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 592
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case_ ( __UpperCAmelCase ,unittest.TestCase ):
A_ = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self : Any )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = 4
__lowerCAmelCase : Any = 3
__lowerCAmelCase : List[Any] = (32, 32)
__lowerCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
__lowerCAmelCase : List[str] = jax.random.uniform(_lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
| 701
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = RoCBertTokenizer
A_ = None
A_ = False
A_ = True
A_ = filter_non_english
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : List[Any] = {}
for i, value in enumerate(_snake_case ):
__lowerCAmelCase : List[Any] = i
__lowerCAmelCase : List[str] = i
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(_snake_case , _snake_case , ensure_ascii=_snake_case )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(_snake_case , _snake_case , ensure_ascii=_snake_case )
def UpperCAmelCase__ ( self : Tuple )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(_snake_case , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_snake_case ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : List[str] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Tuple )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=_snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : Union[str, Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCAmelCase : str = {}
for i, token in enumerate(_snake_case ):
__lowerCAmelCase : str = i
__lowerCAmelCase : Optional[Any] = RoCBertWordpieceTokenizer(vocab=_snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : List[str] )->Tuple:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : Tuple )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__lowerCAmelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCAmelCase : Any = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__lowerCAmelCase : str = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
__lowerCAmelCase : str = tokenizer_r.do_lower_case if hasattr(_snake_case , """do_lower_case""" ) else False
__lowerCAmelCase : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : Dict )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = ["""的""", """人""", """有"""]
__lowerCAmelCase : List[Any] = """""".join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCAmelCase : Dict = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : Tuple = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(_snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowerCAmelCase : Tuple = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : Dict = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(_snake_case )
__lowerCAmelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCAmelCase : int = tokenizer.encode("""你好""" , add_special_tokens=_snake_case )
__lowerCAmelCase : Tuple = tokenizer.encode("""你是谁""" , add_special_tokens=_snake_case )
__lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__lowerCAmelCase : Dict = """你好,你是谁"""
__lowerCAmelCase : str = tokenizer.tokenize(_snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__lowerCAmelCase : int = tokenizer.convert_tokens_to_shape_ids(_snake_case )
__lowerCAmelCase : Dict = tokenizer.convert_tokens_to_pronunciation_ids(_snake_case )
__lowerCAmelCase : Optional[int] = tokenizer.prepare_for_model(
_snake_case , _snake_case , _snake_case , add_special_tokens=_snake_case )
__lowerCAmelCase : Optional[Any] = tokenizer.encode_plus(_snake_case , add_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 240
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__ ( ):
__UpperCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__UpperCAmelCase = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
return image
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any] ):
__UpperCAmelCase = dct.pop(_A )
__UpperCAmelCase = val
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :str ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(_A , requires_grad=_A ), v_bias) )
__UpperCAmelCase = qkv_bias
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Tuple ):
__UpperCAmelCase = 364 if "coco" in model_name else 224
__UpperCAmelCase = BlipaVisionConfig(image_size=_A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCAmelCase = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_A ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCAmelCase = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_A ).to_dict()
elif "t5-xl" in model_name:
__UpperCAmelCase = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__UpperCAmelCase = BlipaConfig(vision_config=_A , text_config=_A )
return config, image_size
@torch.no_grad()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any=None , snake_case_ :Union[str, Any]=False ):
__UpperCAmelCase = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if "opt" in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__UpperCAmelCase = tokenizer('''\n''' , add_special_tokens=_A ).input_ids[0]
__UpperCAmelCase = get_blipa_config(_A , eos_token_id=_A )
__UpperCAmelCase = BlipaForConditionalGeneration(_A ).eval()
__UpperCAmelCase = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__UpperCAmelCase = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase = load_model_and_preprocess(
name=_A , model_type=_A , is_eval=_A , device=_A )
original_model.eval()
print('''Done!''' )
# update state dict keys
__UpperCAmelCase = original_model.state_dict()
__UpperCAmelCase = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(_A )
if key.startswith('''Qformer.bert''' ):
__UpperCAmelCase = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__UpperCAmelCase = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__UpperCAmelCase = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__UpperCAmelCase = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__UpperCAmelCase = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__UpperCAmelCase = key.replace('''t5''' , '''language''' )
__UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(_A , _A )
__UpperCAmelCase = hf_model.load_state_dict(_A , strict=_A )
assert len(_A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCAmelCase = load_demo_image()
__UpperCAmelCase = vis_processors["eval"](_A ).unsqueeze(0 ).to(_A )
__UpperCAmelCase = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_A )
# create processor
__UpperCAmelCase = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_A , image_std=_A )
__UpperCAmelCase = BlipaProcessor(image_processor=_A , tokenizer=_A )
__UpperCAmelCase = processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A )
# make sure processor creates exact same pixel values
assert torch.allclose(_A , _A )
original_model.to(_A )
hf_model.to(_A )
with torch.no_grad():
if "opt" in model_name:
__UpperCAmelCase = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__UpperCAmelCase = hf_model(_A , _A ).logits
else:
__UpperCAmelCase = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__UpperCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__UpperCAmelCase = hf_model(_A , _A , labels=_A ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCAmelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_A )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCAmelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_A )
else:
# cast to same type
__UpperCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(_A ) , _A , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__UpperCAmelCase = ""
__UpperCAmelCase = tokenizer(_A , return_tensors='''pt''' ).input_ids.to(_A )
__UpperCAmelCase = original_model.generate({'''image''': original_pixel_values} )
__UpperCAmelCase = hf_model.generate(
_A , _A , do_sample=_A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _A )
__UpperCAmelCase = input_ids.shape[1]
__UpperCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_A )
__UpperCAmelCase = [text.strip() for text in output_text]
print('''HF generation:''' , _A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_A )
hf_model.save_pretrained(_A )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
_lowercase : List[str] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_lowercase : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
|
"""simple docstring"""
__UpperCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def A ( _A ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def A ( ):
"""simple docstring"""
return sum(
number
for number in range(1_000, 1_000_000 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 584
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def lowercase__ ( __lowercase : int , __lowercase : Any ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
__UpperCamelCase = subprocess.run(__lowercase , shell=__lowercase , stdout=subprocess.PIPE )
__UpperCamelCase = output.stdout.decode('utf-8' )
__UpperCamelCase = json.loads(__lowercase )
__UpperCamelCase = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(__lowercase ) )
if len(__lowercase ) > 0:
__UpperCamelCase = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowercase__ ( __lowercase : List[str] ) -> str:
"""simple docstring"""
return values.split(',' )
a__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
a__ : str =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 434
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a__ : Optional[Any] =logging.get_logger(__name__)
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : str=None , __lowercase : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
if "." in tensor_name:
__UpperCamelCase = tensor_name.split('.' )
for split in splits[:-1]:
__UpperCamelCase = getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
__UpperCamelCase = new_module
__UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
__UpperCamelCase = tensor_name in module._buffers
__UpperCamelCase = getattr(__lowercase , __lowercase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
__UpperCamelCase = False
__UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
__UpperCamelCase = False
__UpperCamelCase = False
else:
__UpperCamelCase = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__UpperCamelCase = old_value.to(__lowercase )
elif isinstance(__lowercase , torch.Tensor ):
__UpperCamelCase = value.to('cpu' )
if value.dtype == torch.inta:
__UpperCamelCase = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__UpperCamelCase = torch.tensor(__lowercase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowercase ) and fpaa_statistics is None:
__UpperCamelCase = new_value.T
__UpperCamelCase = old_value.__dict__
if is_abit:
__UpperCamelCase = bnb.nn.IntaParams(__lowercase , requires_grad=__lowercase , **__lowercase ).to(__lowercase )
elif is_abit:
__UpperCamelCase = bnb.nn.Paramsabit(__lowercase , requires_grad=__lowercase , **__lowercase ).to(__lowercase )
__UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(__lowercase ) )
else:
if value is None:
__UpperCamelCase = old_value.to(__lowercase )
elif isinstance(__lowercase , torch.Tensor ):
__UpperCamelCase = value.to(__lowercase )
else:
__UpperCamelCase = torch.tensor(__lowercase , device=__lowercase )
if is_buffer:
__UpperCamelCase = new_value
else:
__UpperCamelCase = nn.Parameter(__lowercase , requires_grad=old_value.requires_grad )
__UpperCamelCase = new_value
def lowercase__ ( __lowercase : List[Any] , __lowercase : Dict=None , __lowercase : List[Any]=None , __lowercase : str=None , __lowercase : int=False ) -> Optional[int]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
__UpperCamelCase = []
current_key_name.append(__lowercase )
if (isinstance(__lowercase , nn.Linear ) or isinstance(__lowercase , __lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase , __UpperCamelCase = module.weight.shape
else:
__UpperCamelCase = module.in_features
__UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__UpperCamelCase = bnb.nn.LinearabitLt(
__lowercase , __lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__UpperCamelCase = bnb.nn.Linearabit(
__lowercase , __lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
__UpperCamelCase = type(__lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowercase )
if len(list(module.children() ) ) > 0:
__UpperCamelCase , __UpperCamelCase = _replace_with_bnb_linear(
__lowercase , __lowercase , __lowercase , __lowercase , has_been_replaced=__lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple=None , __lowercase : List[Any]=None , __lowercase : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
__UpperCamelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__UpperCamelCase , __UpperCamelCase = _replace_with_bnb_linear(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowercase__ ( *__lowercase : Tuple , **__lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , __lowercase , )
return replace_with_bnb_linear(*__lowercase , **__lowercase )
def lowercase__ ( *__lowercase : Tuple , **__lowercase : int ) -> Any:
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , __lowercase , )
return set_module_quantized_tensor_to_device(*__lowercase , **__lowercase )
def lowercase__ ( __lowercase : Optional[int] ) -> int:
"""simple docstring"""
__UpperCamelCase = deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__UpperCamelCase = find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCamelCase = sum(__lowercase , [] )
__UpperCamelCase = len(__lowercase ) > 0
# Check if it is a base model
__UpperCamelCase = not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCamelCase = list(model.named_children() )
__UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCamelCase = set(__lowercase ) - set(__lowercase )
__UpperCamelCase = list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
__UpperCamelCase = ['.weight', '.bias']
__UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCamelCase = name.replace(__lowercase , '' )
filtered_module_names.append(__lowercase )
return filtered_module_names
| 434
| 1
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Any = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowercase__ : str = MaskFormerConfig(backbone_config=__lowerCAmelCase )
lowercase__ : Tuple = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowercase__ : Any = 8_47
lowercase__ : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowercase__ : Tuple = 1_50
lowercase__ : Optional[Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowercase__ : Optional[int] = 1_71
lowercase__ : Optional[int] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowercase__ : Optional[int] = 1_33
lowercase__ : Optional[int] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowercase__ : Any = 19
lowercase__ : Union[str, Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowercase__ : Optional[int] = 65
lowercase__ : Optional[Any] = '''mapillary-vistas-id2label.json'''
lowercase__ : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Optional[Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
lowercase__ : Dict = dct.pop(__lowerCAmelCase )
lowercase__ : Optional[int] = val
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
lowercase__ : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ : Optional[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Tuple = in_proj_weight[:dim, :]
lowercase__ : Optional[int] = in_proj_bias[: dim]
lowercase__ : Dict = in_proj_weight[
dim : dim * 2, :
]
lowercase__ : Optional[int] = in_proj_bias[
dim : dim * 2
]
lowercase__ : str = in_proj_weight[
-dim :, :
]
lowercase__ : List[str] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
# fmt: off
lowercase__ : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase__ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
lowercase__ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : int = in_proj_weight[: hidden_size, :]
lowercase__ : List[Any] = in_proj_bias[:config.hidden_size]
lowercase__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase__ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ : int = in_proj_weight[-hidden_size :, :]
lowercase__ : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
lowercase__ : Any = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : List[str] = in_proj_weight[: hidden_size, :]
lowercase__ : str = in_proj_bias[:config.hidden_size]
lowercase__ : str = in_proj_weight[hidden_size : hidden_size * 2, :]
lowercase__ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowercase__ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
lowercase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : str = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ) -> List[Any]:
lowercase__ : str = get_maskformer_config(__lowerCAmelCase )
# load original state_dict
with open(__lowerCAmelCase , '''rb''' ) as f:
lowercase__ : Tuple = pickle.load(__lowerCAmelCase )
lowercase__ : Optional[Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowercase__ : Any = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowercase__ : List[Any] = torch.from_numpy(__lowerCAmelCase )
# load 🤗 model
lowercase__ : List[str] = MaskFormerForInstanceSegmentation(__lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCAmelCase , param.shape )
lowercase__ , lowercase__ : Tuple = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCAmelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
lowercase__ : List[str] = prepare_img()
if "vistas" in model_name:
lowercase__ : Optional[Any] = 65
elif "cityscapes" in model_name:
lowercase__ : int = 6_55_35
else:
lowercase__ : str = 2_55
lowercase__ : int = True if '''ade''' in model_name else False
lowercase__ : Union[str, Any] = MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase )
lowercase__ : Tuple = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : str = model(**__lowerCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowercase__ : List[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 560
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ) -> Tuple: # noqa: E741
while r - l > 1:
snake_case = (l + r) // 2
if v[m] >= key:
snake_case = m
else:
snake_case = m # noqa: E741
return r
def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> int:
if len(__lowerCAmelCase ) == 0:
return 0
snake_case = [0] * len(__lowerCAmelCase )
snake_case = 1
snake_case = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
snake_case = v[i]
elif v[i] > tail[length - 1]:
snake_case = v[i]
length += 1
else:
snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
| 0
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Union[str, Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase : Optional[Any] = """sshleifer/bart-tiny-random"""
lowerCAmelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=_a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , *lowerCamelCase = create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a , tempfile.mkdtemp() , e=_a , d=_a )
| 533
| 0
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A : Dict = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
A : Optional[Any] = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
A : Optional[int] = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case : Union[str, Any] =new_id
# turn into Numpy arrays
snake_case : Dict =np.array(_lowerCamelCase )
snake_case : str =np.array(_lowerCamelCase )
if reduce_labels:
snake_case : Union[str, Any] =2_55
snake_case : Optional[Any] =label - 1
snake_case : Optional[Any] =2_55
snake_case : List[Any] =label != ignore_index
snake_case : int =np.not_equal(_lowerCamelCase , _lowerCamelCase )
snake_case : Union[str, Any] =pred_label[mask]
snake_case : List[str] =np.array(_lowerCamelCase )[mask]
snake_case : Union[str, Any] =pred_label[pred_label == label]
snake_case : int =np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
snake_case : List[Any] =np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
snake_case : Optional[Any] =np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
snake_case : List[str] =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ):
snake_case : List[Any] =np.zeros((num_labels,) , dtype=np.floataa )
snake_case : List[Any] =np.zeros((num_labels,) , dtype=np.floataa )
snake_case : int =np.zeros((num_labels,) , dtype=np.floataa )
snake_case : Union[str, Any] =np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCamelCase , _lowerCamelCase ):
snake_case : Dict =intersect_and_union(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ):
snake_case : List[Any] =total_intersect_and_union(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# compute metrics
snake_case : List[Any] ={}
snake_case : Tuple =total_area_intersect.sum() / total_area_label.sum()
snake_case : List[Any] =total_area_intersect / total_area_union
snake_case : List[str] =total_area_intersect / total_area_label
snake_case : List[Any] =np.nanmean(_lowerCamelCase )
snake_case : Dict =np.nanmean(_lowerCamelCase )
snake_case : Union[str, Any] =all_acc
snake_case : List[Any] =iou
snake_case : Union[str, Any] =acc
if nan_to_num is not None:
snake_case : str ={metric: np.nan_to_num(_lowerCamelCase , nan=_lowerCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ), reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
], )
def __snake_case ( self : Any, _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : int, _snake_case : bool, _snake_case : Optional[int] = None, _snake_case : Optional[Dict[int, int]] = None, _snake_case : bool = False, ):
'''simple docstring'''
snake_case : Optional[Any] =mean_iou(
results=__lowerCAmelCase, gt_seg_maps=__lowerCAmelCase, num_labels=__lowerCAmelCase, ignore_index=__lowerCAmelCase, nan_to_num=__lowerCAmelCase, label_map=__lowerCAmelCase, reduce_labels=__lowerCAmelCase, )
return iou_result
| 349
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = len(lowerCamelCase_ )
for i in range(length - 1 ):
lowerCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCamelCase_ ):
if collection[k] < collection[least]:
lowerCAmelCase__ : Any = k
if least != i:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = (collection[i], collection[least])
return collection
if __name__ == "__main__":
snake_case = input("""Enter numbers separated by a comma:\n""").strip()
snake_case = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 568
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
snake_case = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
snake_case = {
"""vinai/phobert-base""": 2_56,
"""vinai/phobert-large""": 2_56,
}
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = set()
lowerCAmelCase__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Dict = char
lowerCAmelCase__ : Dict = set(lowerCamelCase_ )
return pairs
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[str] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , a__ : List[str] , a__ : Dict , a__ : List[str]="<s>" , a__ : Tuple="</s>" , a__ : Dict="</s>" , a__ : Optional[int]="<s>" , a__ : Any="<unk>" , a__ : str="<pad>" , a__ : Tuple="<mask>" , **a__ : Tuple , ):
'''simple docstring'''
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : str = merges_file
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Optional[int] = 3
self.add_from_file(a__ )
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding="utf-8" ) as merges_handle:
lowerCAmelCase__ : Optional[Any] = merges_handle.read().split("\n" )[:-1]
lowerCAmelCase__ : str = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase__ : List[Any] = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase__ : Dict = {}
def _A ( self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _A ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _A ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : Optional[int] , a__ : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(a__ )
lowerCAmelCase__ : Dict = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCAmelCase__ : List[Any] = get_pairs(a__ )
if not pairs:
return token
while True:
lowerCAmelCase__ : Union[str, Any] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Dict = bigram
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = 0
while i < len(a__ ):
try:
lowerCAmelCase__ : List[str] = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : str = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Optional[int] = tuple(a__ )
lowerCAmelCase__ : Dict = new_word
if len(a__ ) == 1:
break
else:
lowerCAmelCase__ : Optional[Any] = get_pairs(a__ )
lowerCAmelCase__ : List[Any] = "@@ ".join(a__ )
lowerCAmelCase__ : Optional[int] = word[:-4]
lowerCAmelCase__ : Dict = word
return word
def _A ( self : Optional[Any] , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Optional[Any] = re.findall(r"\S+\n?" , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(" " ) ) )
return split_tokens
def _A ( self : Optional[int] , a__ : Optional[Any] ):
'''simple docstring'''
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def _A ( self : List[str] , a__ : Tuple ):
'''simple docstring'''
return self.decoder.get(a__ , self.unk_token )
def _A ( self : List[Any] , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = " ".join(a__ ).replace("@@ " , "" ).strip()
return out_string
def _A ( self : List[Any] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ : Optional[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(a__ ):
copyfile(self.merges_file , a__ )
return out_vocab_file, out_merge_file
def _A ( self : Any , a__ : int ):
'''simple docstring'''
if isinstance(a__ , a__ ):
try:
with open(a__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(a__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
lowerCAmelCase__ : str = f.readlines()
for lineTmp in lines:
lowerCAmelCase__ : Any = lineTmp.strip()
lowerCAmelCase__ : Optional[int] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
lowerCAmelCase__ : str = line[:idx]
lowerCAmelCase__ : Dict = len(self.encoder )
| 568
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :str =logging.get_logger(__name__)
__snake_case :Tuple ={
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Tuple = 'speech_to_text_2'
A_ : Union[str, Any] = ['past_key_values']
A_ : Dict = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : Union[str, Any]=10_000 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Any=2_048 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]="relu" , __UpperCamelCase : Optional[Any]=256 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Dict=1_024 , **__UpperCamelCase : Any , ) -> List[Any]:
A = vocab_size
A = d_model
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = decoder_layerdrop
A = use_cache
A = decoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_target_positions
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 106
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3
| 0
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCamelCase = "bert-base-cased"
__lowerCamelCase = "fp16"
__lowerCamelCase = "bf16"
__lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase__( __lowerCamelCase ):
def snake_case__ ( self ) -> Dict:
super().setUp()
A__ = dict(
ACCELERATE_USE_FSDP='true' ,MASTER_ADDR='localhost' ,MASTER_PORT='10999' ,RANK='0' ,LOCAL_RANK='0' ,WORLD_SIZE='1' ,)
def snake_case__ ( self ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCAmelCase_ ):
A__ = self.dist_env.copy()
A__ = f'''{i + 1}'''
A__ = strategy
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def snake_case__ ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCAmelCase_ ):
A__ = self.dist_env.copy()
A__ = prefetch_policy
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def snake_case__ ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCAmelCase_ ):
A__ = self.dist_env.copy()
A__ = state_dict_type
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def snake_case__ ( self ) -> Dict:
A__ = AutoModel.from_pretrained(UpperCAmelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
A__ = self.dist_env.copy()
A__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
A__ = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
A__ = '2000'
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
A__ = self.dist_env.copy()
A__ = 'TRANSFORMER_BASED_WRAP'
A__ = 'T5Layer'
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCAmelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase_ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
A__ = self.dist_env.copy()
A__ = 'SIZE_BASED_WRAP'
A__ = '0'
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def snake_case__ ( self ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
A__ = self.dist_env.copy()
A__ = mp_dtype
with mockenv_context(**UpperCAmelCase_ ):
A__ = Accelerator()
if mp_dtype == "fp16":
A__ = torch.floataa
elif mp_dtype == "bf16":
A__ = torch.bfloataa
A__ = MixedPrecision(param_dtype=UpperCAmelCase_ ,reduce_dtype=UpperCAmelCase_ ,buffer_dtype=UpperCAmelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,UpperCAmelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,UpperCAmelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCAmelCase_ )
def snake_case__ ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
A__ = self.dist_env.copy()
A__ = str(UpperCAmelCase_ ).lower()
with mockenv_context(**UpperCAmelCase_ ):
A__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=UpperCAmelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase__( __lowerCamelCase ):
def snake_case__ ( self ) -> Union[str, Any]:
super().setUp()
A__ = 0.8_2
A__ = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
A__ = {
'multi_gpu_fp16': 32_00,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 20_00,
'fsdp_full_shard_transformer_based_wrap_fp16': 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
A__ = 1_60
A__ = 1_60
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def snake_case__ ( self ) -> Optional[int]:
A__ = os.path.join(self.test_scripts_folder ,'test_performance.py' )
A__ = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
A__ = cmd.copy()
for i, strategy in enumerate(UpperCAmelCase_ ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase_ ,env=os.environ.copy() )
def snake_case__ ( self ) -> Any:
A__ = os.path.join(self.test_scripts_folder ,'test_checkpointing.py' )
A__ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(UpperCAmelCase_ ):
A__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
A__ = len(UpperCAmelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
A__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase_ ,env=os.environ.copy() )
A__ = cmd_config[:-1]
A__ = os.path.join(self.tmpdir ,'epoch_0' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase_ ,env=os.environ.copy() )
def snake_case__ ( self ) -> str:
A__ = os.path.join(self.test_scripts_folder ,'test_peak_memory_usage.py' )
A__ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
A__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(UpperCAmelCase_ ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase_ ,env=os.environ.copy() )
| 706
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = CpmAntTokenizer
lowerCAmelCase__ : List[str] = False
def snake_case__ ( self ) -> Dict:
super().setUp()
A__ = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
A__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def snake_case__ ( self ) -> List[Any]:
A__ = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
A__ = '今天天气真好!'
A__ = ['今天', '天气', '真', '好', '!']
A__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
A__ = '今天天气真好!'
A__ = [tokenizer.bos_token] + tokens
A__ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
A__ = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 536
| 0
|
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase = '''
import os
'''
_lowerCAmelCase = '''
def foo():
import os
return False
'''
_lowerCAmelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
_lowerCAmelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
_lowerCAmelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
_lowerCAmelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
_lowerCAmelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
_lowerCAmelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
_lowerCAmelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
_lowerCAmelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
_lowerCAmelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , """test_file.py""" )
with open(UpperCamelCase , """w""" ) as _tmp_file:
_tmp_file.write(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_imports(UpperCamelCase )
assert parsed_imports == ["os"]
| 565
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565
| 1
|
from ...processing_utils import ProcessorMixin
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["image_processor", "feature_extractor"]
_A = "TvltImageProcessor"
_A = "TvltFeatureExtractor"
def __init__( self : Union[str, Any] , A_ : Dict , A_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=A_ , feature_extractor=A_ )
lowerCamelCase_: Optional[Any] = image_processor
lowerCamelCase_: Union[str, Any] = feature_extractor
def __call__( self : Optional[int] , A_ : Optional[Any]=None , A_ : str=None , A_ : str=None , A_ : Any=None , A_ : int=False , A_ : Tuple=False , *A_ : str , **A_ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
lowerCamelCase_: List[str] = None
if images is not None:
lowerCamelCase_: Tuple = self.image_processor(A_ , mask_pixel=A_ , *A_ , **A_ )
if images_mixed is not None:
lowerCamelCase_: List[str] = self.image_processor(A_ , is_mixed=A_ , *A_ , **A_ )
if audio is not None:
lowerCamelCase_: Any = self.feature_extractor(
A_ , *A_ , sampling_rate=A_ , mask_audio=A_ , **A_ )
lowerCamelCase_: str = {}
if audio is not None:
output_dict.update(A_ )
if images is not None:
output_dict.update(A_ )
if images_mixed_dict is not None:
output_dict.update(A_ )
return output_dict
@property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Tuple = self.image_processor.model_input_names
lowerCamelCase_: str = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 702
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Dict = 1_6
lowercase : Optional[int] = 3_2
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = 1_6 ):
lowerCamelCase_: List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase_: List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_: Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_: List[Any] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_: Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_: Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_: int = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_: List[str] = 8
else:
lowerCamelCase_: List[Any] = None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase_: int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase_: Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Any = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
lowerCamelCase_: List[str] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
lowerCamelCase_: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_: Tuple = config["""lr"""]
lowerCamelCase_: Optional[Any] = int(config["""num_epochs"""] )
lowerCamelCase_: int = int(config["""seed"""] )
lowerCamelCase_: Any = int(config["""batch_size"""] )
set_seed(_UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_: Tuple = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: List[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_: Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_: Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_: Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_: Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_: Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_: str = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_: int = os.path.split(_UpperCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_: Tuple = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_: Tuple = model(**_UpperCAmelCase )
lowerCamelCase_: Any = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_: Dict = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_: Dict = model(**_UpperCAmelCase )
lowerCamelCase_: Tuple = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_: List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(_UpperCAmelCase ),
"""epoch""": epoch,
} , step=_UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=_UpperCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
lowerCamelCase_: Union[str, Any] = parser.parse_args()
lowerCamelCase_: Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 584
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = os.path.join(args.tf_model_dir ,"parameters.json" )
snake_case_ : Dict = json.loads(open(__lowerCamelCase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
snake_case_ : Tuple = args.output + ".pt"
snake_case_ : List[str] = OrderedDict()
with tf.device("/CPU:0" ):
snake_case_ : str = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ : str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ : Union[str, Any] = reader.get_tensor(__lowerCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
snake_case_ : Any = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
snake_case_ : str = 8
snake_case_ : Any = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Optional[Any] = torch.tensor(__lowerCamelCase )
elif key_name.startswith("model/moe" ):
snake_case_ : str = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
snake_case_ : int = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
snake_case_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Tuple = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
snake_case_ : str = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
snake_case_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : int = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
snake_case_ : Optional[int] = key_name[-9:-7]
for i in range(16 ):
snake_case_ : Dict = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
snake_case_ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ : Optional[Any] = torch.tensor(__lowerCamelCase )
elif key_name.startswith("model/mlp" ):
snake_case_ : Tuple = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
snake_case_ : Tuple = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
snake_case_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[Any] = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/p1/bias" ):
snake_case_ : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
snake_case_ : Any = vnp.copy() # same because it is one dimensional
snake_case_ : Union[str, Any] = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/p2/kernel" ):
snake_case_ : Dict = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
snake_case_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Any = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/p2/bias" ):
snake_case_ : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
snake_case_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
snake_case_ : str = torch.tensor(__lowerCamelCase )
elif key_name.startswith("model/ln" ):
snake_case_ : Tuple = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ : Union[str, Any] = "model.blocks.%d.feed_forward.norm.bias" % player
snake_case_ : Optional[Any] = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/g" ):
snake_case_ : Any = "model.blocks.%d.feed_forward.norm.weight" % player
snake_case_ : List[str] = vnp.copy() # same because it is one dimensional
snake_case_ : List[str] = torch.tensor(__lowerCamelCase )
elif key_name.startswith("model/att" ):
snake_case_ : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
snake_case_ : Tuple = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ : Optional[int] = state[:, 0, :, :]
snake_case_ : Optional[int] = state[:, 1, :, :]
snake_case_ : str = state[:, 2, :, :]
snake_case_ : Union[str, Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : List[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
snake_case_ : Optional[Any] = torch.tensor(__lowerCamelCase )
snake_case_ : Optional[int] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
snake_case_ : List[str] = torch.tensor(__lowerCamelCase )
snake_case_ : int = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
snake_case_ : List[str] = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/o/kernel" ):
snake_case_ : Dict = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
snake_case_ : Optional[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ : str = torch.tensor(__lowerCamelCase )
elif key_name.startswith("model/an" ):
snake_case_ : List[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ : Any = "model.blocks.%d.self_attn.norm.bias" % player
snake_case_ : Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(__lowerCamelCase )
elif key_name.endswith("/g" ):
snake_case_ : str = "model.blocks.%d.self_attn.norm.weight" % player
snake_case_ : Tuple = vnp.copy() # same because it is one dimensional
snake_case_ : Optional[int] = torch.tensor(__lowerCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
snake_case_ : Dict = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
snake_case_ : Union[str, Any] = "model.%s.weight" % nlayer
snake_case_ : List[Any] = vnp.copy() # same in embedded
snake_case_ : Optional[Any] = torch.tensor(__lowerCamelCase )
if key_name.startswith("model/wte" ):
snake_case_ : str = "lm_head.weight"
snake_case_ : List[str] = vnp.copy() # same in embedded
snake_case_ : Optional[Any] = torch.tensor(__lowerCamelCase )
elif key_name.startswith("model/wob" ):
snake_case_ : List[Any] = "final_logits_bias"
snake_case_ : str = vnp.copy() # same in embedded
snake_case_ : int = state.reshape((1, -1) )
snake_case_ : int = torch.tensor(__lowerCamelCase )
elif key_name == "model/dense/kernel":
snake_case_ : str = "model.last_project.weight"
snake_case_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ : Dict = torch.tensor(__lowerCamelCase )
elif key_name == "model/dense_1/bias":
snake_case_ : List[str] = "model.last_project.bias"
snake_case_ : List[str] = vnp.copy() # same because it is one dimensional
snake_case_ : Dict = torch.tensor(__lowerCamelCase )
torch.save(__lowerCamelCase ,args.output )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__lowerCamelCase : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 653
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[int] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 468
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = tmp_path / "cache"
__snake_case : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : str = TextDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_text_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = tmp_path / "cache"
__snake_case : Tuple = {"text": "string"}
__snake_case : List[str] = features.copy() if features else default_expected_features
__snake_case : str = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Any = TextDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_text_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = tmp_path / "cache"
__snake_case : Union[str, Any] = {"text": "string"}
__snake_case : Optional[Any] = TextDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_text_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = text_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : List[Any] = [text_path]
__snake_case : str = tmp_path / "cache"
__snake_case : Union[str, Any] = {"text": "string"}
__snake_case : List[Any] = TextDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_text_dataset(_lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=("train",) ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
__snake_case : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path / "cache"
__snake_case : List[str] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : int = TextDatasetReader({"train": text_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_text_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__snake_case : Any = {"text": "string"}
__snake_case : Optional[int] = features.copy() if features else default_expected_features
__snake_case : Optional[int] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : List[Any] = TextDatasetReader({"train": text_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_text_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if split:
__snake_case : int = {split: text_path}
else:
__snake_case : Optional[int] = "train"
__snake_case : int = {"train": text_path, "test": text_path}
__snake_case : Optional[int] = tmp_path / "cache"
__snake_case : Union[str, Any] = {"text": "string"}
__snake_case : Optional[Any] = TextDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_text_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 717
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Tuple=13 , lowerCamelCase : Tuple=32 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : Tuple=16 , lowerCamelCase : Any=[32, 64, 128] , lowerCamelCase : str=[1, 2, 1] , lowerCamelCase : Union[str, Any]=[2, 2, 4] , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2.0 , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : Any=False , lowerCamelCase : Any=True , lowerCamelCase : str=0.02 , lowerCamelCase : Tuple=1E-5 , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Any=8 , lowerCamelCase : List[str]=["stage1", "stage2"] , lowerCamelCase : Optional[int]=[1, 2] , ) -> Optional[int]:
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Optional[int] = image_size
__snake_case : int = patch_size
__snake_case : Any = num_channels
__snake_case : List[Any] = embed_dim
__snake_case : str = hidden_sizes
__snake_case : int = depths
__snake_case : Any = num_heads
__snake_case : Any = window_size
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Any = qkv_bias
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Tuple = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = use_absolute_embeddings
__snake_case : Optional[int] = patch_norm
__snake_case : int = layer_norm_eps
__snake_case : Any = initializer_range
__snake_case : Any = is_training
__snake_case : Tuple = scope
__snake_case : Tuple = use_labels
__snake_case : Any = type_sequence_label_size
__snake_case : Dict = encoder_stride
__snake_case : Union[str, Any] = out_features
__snake_case : Union[str, Any] = out_indices
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str ) -> str:
__snake_case : Tuple = FocalNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
__snake_case : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] ) -> Optional[int]:
__snake_case : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Dict = None
__snake_case : Any = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : List[Any] = FocalNetForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : List[str] = 1
__snake_case : Any = FocalNetForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self : str , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : Tuple = self.type_sequence_label_size
__snake_case : List[Any] = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : str = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Optional[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : Optional[Any] = FocalNetModelTester(self )
__snake_case : int = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : str ) -> Tuple:
return
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : str ) -> int:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def __snake_case ( self : str ) -> List[str]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> List[str]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Dict = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ) -> str:
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# FocalNet has a different seq_length
__snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = reshaped_hidden_states[0].shape
__snake_case : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : Tuple ) -> int:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = 3
__snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@slow
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = FocalNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[Any]:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __snake_case ( self : int ) -> Optional[int]:
__snake_case : List[str] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase )
__snake_case : str = self.default_image_processor
__snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case : Optional[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**lowerCamelCase )
# verify the logits
__snake_case : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Dict = FocalNetConfig
__UpperCAmelCase : Any = False
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = FocalNetModelTester(self )
| 203
| 0
|
from __future__ import annotations
from fractions import Fraction
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = []
snake_case : Optional[Any] = 11
snake_case : Any = int('''1''' + '''0''' * digit_len )
for num in range(__A , __A ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__A , __A ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
snake_case : Dict = 10
return solutions
def a_ ( __magic_name__ = 2 ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = 1.0
for fraction in fraction_list(__A ):
snake_case : int = Fraction(__A )
result *= frac.denominator / frac.numerator
return int(__A )
if __name__ == "__main__":
print(solution())
| 598
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase__ ( __A :Any ,__A :tuple ,__A :Path ,__A :int ,__A :Union[str, Any] ,__A :Optional[Any] ,__A :Optional[Any] ,__A :List[Any]=False ,):
"""simple docstring"""
output_path.parent.mkdir(parents=__A ,exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,use_external_data_format=__A ,enable_onnx_checker=__A ,opset_version=__A ,)
else:
export(
__A ,__A ,f=output_path.as_posix() ,input_names=__A ,output_names=__A ,dynamic_axes=__A ,do_constant_folding=__A ,opset_version=__A ,)
@torch.no_grad()
def lowerCamelCase__ ( __A :str ,__A :str ,__A :int ,__A :bool = False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__snake_case = """cpu"""
__snake_case = Path(__A )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + """/vae""" )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__A ,model_args=(
torch.randn(1 ,__A ,2_5 ,2_5 ).to(device=__A ,dtype=__A ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=__A ,)
del vae_decoder
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 268
| 0
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = "ybelkada/fonts"
def lowerCamelCase__ ( )-> Optional[Any]:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
_check_torch_version()
UpperCamelCase = image_tensor.unsqueeze(0 )
UpperCamelCase = torch.nn.functional.unfold(__lowerCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __lowerCAmelCase , __lowerCAmelCase , -1 )
UpperCamelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ = 36 , UpperCAmelCase_ = "black" , UpperCAmelCase_ = "white" , UpperCAmelCase_ = 5 , UpperCAmelCase_ = 5 , UpperCAmelCase_ = 5 , UpperCAmelCase_ = 5 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , )-> Image.Image:
"""simple docstring"""
requires_backends(__lowerCAmelCase , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase = textwrap.TextWrapper(width=80 )
UpperCamelCase = wrapper.wrap(text=__lowerCAmelCase )
UpperCamelCase = "\n".join(__lowerCAmelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase = io.BytesIO(__lowerCAmelCase )
elif font_path is not None:
UpperCamelCase = font_path
else:
UpperCamelCase = hf_hub_download(__lowerCAmelCase , "Arial.TTF" )
UpperCamelCase = ImageFont.truetype(__lowerCAmelCase , encoding="UTF-8" , size=__lowerCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase = ImageDraw.Draw(Image.new("RGB" , (1, 1) , __lowerCAmelCase ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = temp_draw.textbbox((0, 0) , __lowerCAmelCase , __lowerCAmelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase = text_width + left_padding + right_padding
UpperCamelCase = text_height + top_padding + bottom_padding
UpperCamelCase = Image.new("RGB" , (image_width, image_height) , __lowerCAmelCase )
UpperCamelCase = ImageDraw.Draw(__lowerCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=__lowerCAmelCase , fill=__lowerCAmelCase , font=__lowerCAmelCase )
return image
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
requires_backends(__lowerCAmelCase , "vision" )
# Convert to PIL image if necessary
UpperCamelCase = to_pil_image(__lowerCAmelCase )
UpperCamelCase = render_text(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase = max(header_image.width , image.width )
UpperCamelCase = int(image.height * (new_width / image.width) )
UpperCamelCase = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase = to_numpy_array(__lowerCAmelCase )
if infer_channel_dimension_format(__lowerCAmelCase ) == ChannelDimension.LAST:
UpperCamelCase = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.LAST )
return new_image
class __a ( __UpperCAmelCase ):
UpperCamelCase_ : Tuple = ['''flattened_patches''']
def __init__( self : Optional[int] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : int = 2_048 , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[Any] , )-> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
UpperCamelCase = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = max_patches
UpperCamelCase = is_vqa
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : dict , **UpperCAmelCase_ : int )-> int:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase = to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.FIRST )
UpperCamelCase = torch.from_numpy(lowerCAmelCase_ )
UpperCamelCase , UpperCamelCase = patch_size["height"], patch_size["width"]
UpperCamelCase , UpperCamelCase = get_image_size(lowerCAmelCase_ )
# maximize scale s.t.
UpperCamelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase = max(min(math.floor(scale * image_height / patch_height ) , lowerCAmelCase_ ) , 1 )
UpperCamelCase = max(min(math.floor(scale * image_width / patch_width ) , lowerCAmelCase_ ) , 1 )
UpperCamelCase = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowerCAmelCase_ , antialias=lowerCAmelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase = torch_extract_patches(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase = patches.shape
UpperCamelCase = patches_shape[1]
UpperCamelCase = patches_shape[2]
UpperCamelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase = torch.arange(lowerCAmelCase_ ).reshape([rows, 1] ).repeat(1 , lowerCAmelCase_ ).reshape([rows * columns, 1] )
UpperCamelCase = torch.arange(lowerCAmelCase_ ).reshape([1, columns] ).repeat(lowerCAmelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase = row_ids.to(torch.floataa )
UpperCamelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase = torch.nn.functional.pad(lowerCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase = to_numpy_array(lowerCAmelCase_ )
return result
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple )-> Tuple:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase = np.mean(lowerCAmelCase_ )
UpperCamelCase = np.std(lowerCAmelCase_ )
UpperCamelCase = max(lowerCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[int] , )-> List[str]:
"""simple docstring"""
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = patch_size if patch_size is not None else self.patch_size
UpperCamelCase = max_patches if max_patches is not None else self.max_patches
UpperCamelCase = self.is_vqa
if kwargs.get("data_format" , lowerCAmelCase_ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase = kwargs.pop("font_bytes" , lowerCAmelCase_ )
UpperCamelCase = kwargs.pop("font_path" , lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCamelCase = [header_text] * len(lowerCAmelCase_ )
UpperCamelCase = [
render_header(lowerCAmelCase_ , header_text[i] , font_bytes=lowerCAmelCase_ , font_path=lowerCAmelCase_ )
for i, image in enumerate(lowerCAmelCase_ )
]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCAmelCase_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase = [
self.extract_flattened_patches(image=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , patch_size=lowerCAmelCase_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowerCAmelCase_ )
return encoded_outputs
| 712
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = (EulerDiscreteScheduler,)
UpperCamelCase_ : Dict = 10
def _SCREAMING_SNAKE_CASE ( self : Any , **UpperCAmelCase_ : str )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {
"num_train_timesteps": 1_100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCAmelCase_ )
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int )-> Any:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> str:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase = sample.to(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCamelCase = sample.to(UpperCAmelCase_ )
for t in scheduler.timesteps:
UpperCamelCase = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ )
UpperCamelCase = output.prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCAmelCase_ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 556
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class a ( unittest.TestCase ):
_snake_case : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_snake_case : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_snake_case : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ):
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , {"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE )]} )
# No kwarg
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(__SCREAMING_SNAKE_CASE , {"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE )]} )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(__SCREAMING_SNAKE_CASE , {"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE )]} )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE , {"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , {"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_UpperCAmelCase = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , {"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(__SCREAMING_SNAKE_CASE ), """labels""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )], """scores""": [ANY(__SCREAMING_SNAKE_CASE ), ANY(__SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier(__SCREAMING_SNAKE_CASE , candidate_labels="""politics""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__SCREAMING_SNAKE_CASE , )
self.run_entailment_id(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Pipeline ):
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(__SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
_UpperCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 277
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__lowerCAmelCase = 'facebook/wmt19-en-de'
__lowerCAmelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__lowerCAmelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__lowerCAmelCase = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCAmelCase = tokenizer(['Making tiny model'], return_tensors='pt')
__lowerCAmelCase = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
__lowerCAmelCase = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 466
| 0
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config
SCREAMING_SNAKE_CASE : Dict = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
SCREAMING_SNAKE_CASE : List[str] = MBartConfig(
is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , add_cross_attention=_SCREAMING_SNAKE_CASE , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_SCREAMING_SNAKE_CASE , add_final_layer_norm=_SCREAMING_SNAKE_CASE , )
return encoder_config, decoder_config
def __lowercase (_SCREAMING_SNAKE_CASE :Any ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
SCREAMING_SNAKE_CASE : List[str] = '''encoder.''' + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE : List[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE : str = '''encoder.layernorm.bias'''
return name
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
SCREAMING_SNAKE_CASE : List[str] = key.split('''.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_split[3] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_split[5] )
SCREAMING_SNAKE_CASE : Tuple = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Dict = val[:dim]
SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : str = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE : Tuple = val
return orig_state_dict
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Union[str, Any]=None , _SCREAMING_SNAKE_CASE :List[Any]=False ):
# load original model
SCREAMING_SNAKE_CASE : List[str] = DonutModel.from_pretrained(_SCREAMING_SNAKE_CASE ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_configs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = DonutSwinModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = MBartForCausalLM(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : Any = original_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify results on scanned document
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''hf-internal-testing/example-documents''' )
SCREAMING_SNAKE_CASE : Any = dataset['''test'''][0]['''image'''].convert('''RGB''' )
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE , from_slow=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE : int = DonutProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE : int = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
SCREAMING_SNAKE_CASE : str = '''When is the coffee break?'''
SCREAMING_SNAKE_CASE : int = task_prompt.replace('''{user_input}''' , _SCREAMING_SNAKE_CASE )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE : List[Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE : List[str] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE : Dict = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE : Tuple = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE : int = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = original_model.decoder.tokenizer(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )[
'''input_ids'''
]
SCREAMING_SNAKE_CASE : str = original_model.encoder.model.patch_embed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = model.encoder.embeddings(_SCREAMING_SNAKE_CASE )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE : Any = original_model.encoder(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = model.encoder(_SCREAMING_SNAKE_CASE ).last_hidden_state
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE : Tuple = original_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).logits
SCREAMING_SNAKE_CASE : Tuple = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
snake_case_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 355
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowercase (_SCREAMING_SNAKE_CASE :List[str]=32 , _SCREAMING_SNAKE_CASE :List[Any]=10 , _SCREAMING_SNAKE_CASE :Optional[Any]=1_00 , _SCREAMING_SNAKE_CASE :int=10_26 , _SCREAMING_SNAKE_CASE :List[Any]=True , _SCREAMING_SNAKE_CASE :str="data/tokenized_stories_train_wikitext103.jbl" , _SCREAMING_SNAKE_CASE :List[Any]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = generate_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Any = load_gpta('''gpt2''' ).to(_SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : List[Any] = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , _SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Optional[int]=15 , _SCREAMING_SNAKE_CASE :Any=1_28 , _SCREAMING_SNAKE_CASE :Any=1_00 , _SCREAMING_SNAKE_CASE :List[str]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(_SCREAMING_SNAKE_CASE )
# Train secondary learner
SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_epochs=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=_SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any]=32 , _SCREAMING_SNAKE_CASE :Tuple=10_00 , _SCREAMING_SNAKE_CASE :int=16 , _SCREAMING_SNAKE_CASE :List[str]=1.0 , _SCREAMING_SNAKE_CASE :Any=recopy_gpta , _SCREAMING_SNAKE_CASE :Tuple=None , _SCREAMING_SNAKE_CASE :int=10 , _SCREAMING_SNAKE_CASE :Optional[int]="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Any = RandomSampler(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = max_steps // (len(_SCREAMING_SNAKE_CASE )) + 1
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = recopy_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(_SCREAMING_SNAKE_CASE )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
for epoch in range(int(_SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(_SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : str = secondary_learner.forward(
torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowercase ():
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=_SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=_SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : int = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : List[Any] = training_secondary_learner(
_SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_SCREAMING_SNAKE_CASE , secondary_learner=_SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 355
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.