code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Optional[Any] , ):
a : List[str] = parent
a : Optional[Any] = 13
a : Dict = 7
a : Dict = 30
a : Optional[int] = self.seq_length + self.mem_len
a : Optional[int] = 15
a : Dict = True
a : List[str] = True
a : Union[str, Any] = 99
a : Any = [10, 50, 80]
a : Dict = 32
a : Dict = 32
a : Optional[int] = 4
a : List[Any] = 8
a : List[Any] = 128
a : Optional[int] = 2
a : List[Any] = 2
a : Tuple = None
a : str = 1
a : Dict = 0
a : Any = 3
a : Any = self.vocab_size - 1
a : Optional[int] = 0.01
def __snake_case ( self : Union[str, Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Any = None
if self.use_labels:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self : List[str]):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int):
a : List[str] = TFTransfoXLModel(__UpperCAmelCase)
a , a : List[str] = model(__UpperCAmelCase).to_tuple()
a : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
a , a : Union[str, Any] = model(__UpperCAmelCase).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]):
a : List[Any] = TFTransfoXLLMHeadModel(__UpperCAmelCase)
a , a : Optional[Any] = model(__UpperCAmelCase).to_tuple()
a : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels}
a , a : Optional[int] = model(__UpperCAmelCase).to_tuple()
a , a : Optional[int] = model([input_ids_a, mems_a]).to_tuple()
a : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
a , a : List[str] = model(__UpperCAmelCase).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]):
a : Dict = TFTransfoXLForSequenceClassification(__UpperCAmelCase)
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a)) : List[Any] = config_and_inputs
a : List[str] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : List[Any] = () if is_tf_available() else ()
UpperCAmelCase : Tuple = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase : Any = False
UpperCAmelCase : int = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Any = False
def __snake_case ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self : Any):
a : Any = TFTransfoXLModelTester(self)
a : Any = ConfigTester(self , config_class=__UpperCAmelCase , d_embed=37)
def __snake_case ( self : int):
self.config_tester.run_common_tests()
def __snake_case ( self : int):
self.model_tester.set_seed()
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__UpperCAmelCase)
def __snake_case ( self : str):
self.model_tester.set_seed()
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a : Optional[int] = model_class(__UpperCAmelCase)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
a : Optional[int] = model.get_output_embeddings()
assert isinstance(__UpperCAmelCase , tf.keras.layers.Layer)
a : List[str] = model.get_bias()
assert name is None
else:
a : List[str] = model.get_output_embeddings()
assert x is None
a : Optional[int] = model.get_bias()
assert name is None
def __snake_case ( self : Optional[Any]):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self : str):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = TFTransfoXLModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __snake_case ( self : Tuple):
pass
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __snake_case ( self : List[str]):
a : int = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
a : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a : Union[str, Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a : Optional[int] = model.generate(__UpperCAmelCase , max_length=200 , do_sample=__UpperCAmelCase)
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase)
| 40
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""image_processor""", """tokenizer"""]
__lowercase = """ChineseCLIPImageProcessor"""
__lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs.pop('feature_extractor' )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.image_processor
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
_snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
_snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
| 42
|
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowercase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowercase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowercase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 0
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 44
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__a = '''The dog is cute and lives in the garden house'''
__a = jnp.array([tokenizer.encode(_a )] )
__a = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__a = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__a = model(_a )['''last_hidden_state''']
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _a , atol=1E-3 ) )
| 45
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=SCREAMING_SNAKE_CASE , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=SCREAMING_SNAKE_CASE , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=SCREAMING_SNAKE_CASE , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=SCREAMING_SNAKE_CASE , default=10_00 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=SCREAMING_SNAKE_CASE , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE , default=5_12 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=SCREAMING_SNAKE_CASE , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCAmelCase = parser.parse_args()
return args
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
def fn(SCREAMING_SNAKE_CASE : Optional[int] ):
return tokenizer(examples["""text"""] )
return fn
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCAmelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCAmelCase = tf.train.Features(feature=SCREAMING_SNAKE_CASE )
lowerCAmelCase = tf.train.Example(features=SCREAMING_SNAKE_CASE )
lowerCAmelCase = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE )
return records
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , args.limit )
lowerCAmelCase = dataset.select(range(SCREAMING_SNAKE_CASE ) )
print(F'Limiting the dataset to {args.limit} entries.' )
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase = tokenize_function(SCREAMING_SNAKE_CASE )
lowerCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE : Any ):
# Concatenate all texts.
lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase = dataset_tokenized.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , batch_size=10_00 , num_proc=4 )
lowerCAmelCase = 0
lowerCAmelCase = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE ) , args.shard_size ):
lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase = len(dataset_snapshot["""input_ids"""] )
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , F'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCAmelCase = get_serialized_examples(SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE )
print("""Wrote file {} containing {} records""".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , """w""" ) as f:
print(F'Total {args.split} records: {total_records}' , file=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
main(args)
| 46
|
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A__ :
def __init__( self : Tuple , _a : Any , _a : int , _a : int ) -> List[str]:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_SCREAMING_SNAKE_CASE =img
_SCREAMING_SNAKE_CASE =img.shape[1]
_SCREAMING_SNAKE_CASE =img.shape[0]
_SCREAMING_SNAKE_CASE =dst_width
_SCREAMING_SNAKE_CASE =dst_height
_SCREAMING_SNAKE_CASE =self.src_w / self.dst_w
_SCREAMING_SNAKE_CASE =self.src_h / self.dst_h
_SCREAMING_SNAKE_CASE =_SCREAMING_SNAKE_CASE =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def A ( self : Any ) -> Tuple:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_SCREAMING_SNAKE_CASE =self.img[self.get_y(_a )][self.get_x(_a )]
def A ( self : int , _a : int ) -> int:
'''simple docstring'''
return int(self.ratio_x * x )
def A ( self : Dict , _a : int ) -> int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase : Optional[Any] = 8_0_0, 6_0_0
lowerCamelCase : str = imread("image_data/lena.jpg", 1)
lowerCamelCase : Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 47
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
| 0
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Tuple = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ,__UpperCAmelCase ):
UpperCamelCase__ : Any = '''focalnet'''
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=96 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[Any]=[192, 384, 768, 768] , __SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : List[str]=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE : Dict=[3, 3, 3, 3] , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : str=4.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-4 , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-5 , __SCREAMING_SNAKE_CASE : Any=32 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = use_conv_embed
__a = hidden_sizes
__a = depths
__a = focal_levels
__a = focal_windows
__a = hidden_act
__a = mlp_ratio
__a = hidden_dropout_prob
__a = drop_path_rate
__a = use_layerscale
__a = layerscale_value
__a = use_post_layernorm
__a = use_post_layernorm_in_modulation
__a = normalize_modulator
__a = initializer_range
__a = layer_norm_eps
__a = encoder_stride
__a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(self.depths) + 1)]
__a , __a = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names)
| 49
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( a ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(_snake_case):
self.assertDictEqual(_snake_case , example_records[i])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self._create_example_records()
UpperCAmelCase_ = Dataset.from_list(_snake_case)
UpperCAmelCase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def lowerCamelCase ( self : Optional[Any]): # checks what happens with missing columns
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def lowerCamelCase ( self : Any): # checks if the type can be inferred from the second record
"""simple docstring"""
UpperCAmelCase_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
UpperCAmelCase_ = Dataset.from_list(_snake_case)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = Dataset.from_list([])
self.assertEqual(len(_snake_case) , 0)
self.assertListEqual(dset.column_names , [])
| 51
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : Any = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
def run_func(_lowerCAmelCase ):
@wraps(_lowerCAmelCase )
def run_in_eager_mode(*_lowerCAmelCase , **_lowerCAmelCase ):
return func(*_lowerCAmelCase , **_lowerCAmelCase )
@wraps(_lowerCAmelCase )
@tf.function(experimental_compile=_lowerCAmelCase )
def run_in_graph_mode(*_lowerCAmelCase , **_lowerCAmelCase ):
return func(*_lowerCAmelCase , **_lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> ["tf.Tensor"]:
UpperCamelCase : Optional[Any] = random.Random()
UpperCamelCase : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A__ ( __snake_case ):
_UpperCAmelCase :TensorFlowBenchmarkArguments
_UpperCAmelCase :PretrainedConfig
_UpperCAmelCase :str = "TensorFlow"
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return tf.__version__
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Optional[Any] = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_speed(_inference )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Any = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_speed(_train )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
UpperCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Union[str, Any] = self._prepare_inference_func(A_ , A_ , A_ )
return self._measure_memory(_inference )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ )
UpperCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCamelCase : Union[str, Any] = self._prepare_train_func(A_ , A_ , A_ )
return self._measure_memory(_train )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCamelCase : Union[str, Any] = (
hasattr(A_ , "architectures" )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase : Optional[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase : str = __import__("transformers" , fromlist=[model_class] )
UpperCamelCase : Dict = getattr(A_ , A_ )
UpperCamelCase : List[str] = model_cls(A_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCamelCase : Tuple = TF_MODEL_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase : Optional[int] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size
UpperCamelCase : str = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A_ , decoder_input_ids=A_ , training=A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A_ , training=A_ )
UpperCamelCase : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCamelCase : List[Any] = (
hasattr(A_ , "architectures" )
and isinstance(config.architectures , A_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase : Dict = __import__("transformers" , fromlist=[model_class] )
UpperCamelCase : Any = getattr(A_ , A_ )
UpperCamelCase : List[str] = model_cls(A_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCamelCase : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase : Optional[Any] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size
UpperCamelCase : Any = random_input_ids(A_ , A_ , A_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase : List[Any] = model(A_ , decoder_input_ids=A_ , labels=A_ , training=A_ )[0]
UpperCamelCase : Dict = tf.gradients(A_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ , training=A_ )[0]
UpperCamelCase : Optional[int] = tf.gradients(A_ , model.trainable_variables )
return gradients
UpperCamelCase : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(A_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase : str = timeit.repeat(
A_ , repeat=self.args.repeat , number=10 , )
return min(A_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCamelCase : Dict = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCamelCase : Any = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase : List[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase : int = nvml.nvmlDeviceGetMemoryInfo(A_ )
UpperCamelCase : Tuple = meminfo.used
UpperCamelCase : List[str] = Memory(A_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCamelCase : Optional[Any] = None
else:
UpperCamelCase : List[str] = measure_peak_memory_cpu(A_ )
UpperCamelCase : Dict = Memory(A_ ) if isinstance(A_ , A_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase : int = stop_memory_tracing(A_ )
if memory is None:
UpperCamelCase : str = summary.total
else:
UpperCamelCase : List[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 52
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41
| 0
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : Any =logging.getLogger(__name__)
a__ : Optional[int] =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a__ : Dict =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowerCamelCase )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "The input training data file (a text file)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__lowerCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__lowerCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
SCREAMING_SNAKE_CASE_ : bool =field(default=__lowerCamelCase , metadata={"help": "Whether ot not to use whole word mask."} )
SCREAMING_SNAKE_CASE_ : float =field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
SCREAMING_SNAKE_CASE_ : float =field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
SCREAMING_SNAKE_CASE_ : int =field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
SCREAMING_SNAKE_CASE_ : int =field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__ ( __lowercase : DataTrainingArguments , __lowercase : PreTrainedTokenizer , __lowercase : bool = False , __lowercase : Optional[str] = None , ) -> Any:
"""simple docstring"""
def _dataset(__lowercase : Tuple , __lowercase : int=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , ref_path=__lowercase , )
return LineByLineTextDataset(tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
__UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
__UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
__UpperCamelCase = AutoModelWithLMHead.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
__UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__UpperCamelCase = (
get_dataset(__lowercase , tokenizer=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__UpperCamelCase = (
get_dataset(__lowercase , tokenizer=__lowercase , evaluate=__lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
else:
__UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=__lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=__lowercase , args=__lowercase , data_collator=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , prediction_loss_only=__lowercase , )
# Training
if training_args.do_train:
__UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = math.exp(eval_output['eval_loss'] )
__UpperCamelCase = {'perplexity': perplexity}
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(__lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowercase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(__lowercase )
return results
def lowercase__ ( __lowercase : Tuple ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 53
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
__SCREAMING_SNAKE_CASE = len(bin(lowerCAmelCase_ )[3:] )
__SCREAMING_SNAKE_CASE = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:]
__SCREAMING_SNAKE_CASE = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: str=0.6 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
lowerCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = TFViTMAEModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : str = outputs_dict[0].numpy()
lowerCamelCase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCamelCase_ ( self: Dict ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = v.numpy()
else:
lowerCamelCase__ : Union[str, Any] = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = prepare_numpy_arrays(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: str ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
lowerCamelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ : List[str] = main_layer_class(UpperCamelCase__ )
lowerCamelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ : List[str] = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
lowerCamelCase__ : int = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : Any = outputs.last_hidden_state.numpy()
lowerCamelCase__ : List[str] = 0
else:
lowerCamelCase__ : int = outputs.logits.numpy()
lowerCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : Dict = model_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : str = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : Union[str, Any] = after_outputs["""logits"""].numpy()
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
def lowerCamelCase_ ( self: Any ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
lowerCamelCase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ : int = model_class.from_config(model.config )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[str] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : Tuple = ViTMAEConfig()
lowerCamelCase__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ : str = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 41
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase , )
assert hasattr(self , "env" )
def snake_case ( self , UpperCamelCase=1 ):
"""simple docstring"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case ( self ):
"""simple docstring"""
# create estimator
lowerCamelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase )
| 55
|
'''simple docstring'''
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: list[int] ):
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : List[str] ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case_ = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = tokenizer.decode(greedy_ids[0] )
snake_case_ = TextIteratorStreamer(lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
snake_case_ = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
snake_case_ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : Dict ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ )
snake_case_ = greedy_ids[:, input_ids.shape[1] :]
snake_case_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ , skip_prompt=lowercase_ )
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case_ = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_ )
def A_ ( self : List[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case_ = AutoTokenizer.from_pretrained('''distilgpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case_ = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ )
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case_ = cs.out[:-1] # Remove the final "\n"
snake_case_ = tokenizer(lowercase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A_ ( self : str ):
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase_ )
snake_case_ = -1
snake_case_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ )
snake_case_ = TextIteratorStreamer(lowercase_ , timeout=0.001 )
snake_case_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
snake_case_ = Thread(target=model.generate , kwargs=lowercase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_ ):
snake_case_ = ''''''
for new_text in streamer:
streamer_text += new_text
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =BlenderbotSmallConfig
__UpperCAmelCase : Tuple ={}
__UpperCAmelCase : Dict ="""gelu"""
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_blenderbot_small_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = TFBlenderbotSmallModel(config=__a ).get_decoder()
__lowerCAmelCase = inputs_dict["input_ids"]
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict["attention_mask"][:1, :]
__lowerCAmelCase = inputs_dict["head_mask"]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(__a , attention_mask=__a )[0]
__lowerCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCAmelCase : Dict =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : int =(
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str =True
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Dict =False
def snake_case ( self ):
__lowerCAmelCase = TFBlenderbotSmallModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =[
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
__UpperCAmelCase : Optional[int] ="""facebook/blenderbot_small-90M"""
@cached_property
def snake_case ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def snake_case ( self ):
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" )
__lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
__lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 57
|
'''simple docstring'''
from __future__ import annotations
_A : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: dict[str, list[str]] , UpperCamelCase__: str ):
lowerCamelCase__ : str = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Any = source_vertex
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = {self.source_vertex}
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Tuple = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCamelCase__ : List[str] = vertex
queue.append(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCamelCase__ : int = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_A : int =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 41
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 58
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = tempfile.mkdtemp()
snake_case : Tuple = BlipImageProcessor()
snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case : Dict = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : Dict = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(snake_case__ , return_tensors="np" )
snake_case : List[Any] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Union[str, Any] = "lower newer"
snake_case : int = processor(text=snake_case__ )
snake_case : List[str] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : str = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Dict = processor.batch_decode(snake_case__ )
snake_case : str = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Optional[int] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = "lower newer"
snake_case : int = self.prepare_image_inputs()
snake_case : Dict = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 59
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
lowerCAmelCase : Optional[int] = gray_code_sequence_string(_snake_case )
#
# convert them to integers
for i in range(len(_snake_case ) ):
lowerCAmelCase : Any = int(sequence[i] , 2 )
return sequence
def _snake_case ( _snake_case : int ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCAmelCase : int = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCAmelCase : List[Any] = gray_code_sequence_string(bit_count - 1 )
lowerCAmelCase : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCAmelCase : str = '''0''' + smaller_sequence[i]
sequence.append(_snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCAmelCase : int = '''1''' + smaller_sequence[i]
sequence.append(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 61
|
'''simple docstring'''
from __future__ import annotations
import requests
_A : str =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ) -> dict:
lowerCamelCase__ : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
lowerCamelCase__ : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : str = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
lowerCamelCase__ : Dict = {}
for id_ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_A : Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
lowerCamelCase__ : Optional[int] = """"""
if is_panoptic:
lowerCamelCase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:256, :]
lowerCamelCase__ : Any = in_proj_bias[:256]
lowerCamelCase__ : str = in_proj_weight[256:512, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase__ : Dict = in_proj_weight[-256:, :]
lowerCamelCase__ : str = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ : Any = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : int = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ : List[str] = 250
else:
lowerCamelCase__ : int = 91
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """coco-detection-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase )
# prepare image
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval()
lowerCamelCase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Tuple = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A : Optional[Any] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , **snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
_snake_case : Dict = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 64
|
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41
| 0
|
from collections.abc import Generator
def lowerCAmelCase_ ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = 0, 1
while True:
UpperCAmelCase__ , UpperCAmelCase__ = b, a + b
yield b
def lowerCAmelCase_ ( __A = 1_000 ) -> int:
'''simple docstring'''
UpperCAmelCase__ = 1
UpperCAmelCase__ = fibonacci_generator()
while len(str(next(__A ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 65
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 0
|
"""simple docstring"""
import operator as op
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Tuple = []
snake_case_ :Dict = lambda _lowercase, _lowercase : int(x / y ) # noqa: E731 integer division operation
snake_case_ :List[str] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ), """Action""".center(12 ), """Stack""", sep=""" | """ )
print("""-""" * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("""push(""" + x + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """ )
else:
snake_case_ :str = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ), ("""pop(""" + b + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """ )
snake_case_ :List[str] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ), ("""pop(""" + a + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """ )
stack.append(
str(opr[x](int(_lowercase ), int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("""push(""" + a + x + b + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """, )
return int(stack[0] )
if __name__ == "__main__":
__a = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 66
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowerCAmelCase__ = {"""target_lang""": """fi""", """source_lang""": """en"""}
lowerCAmelCase__ = """>>zh<<"""
lowerCAmelCase__ = """Helsinki-NLP/"""
if is_torch_available():
lowerCAmelCase__ = """pt"""
elif is_tf_available():
lowerCAmelCase__ = """tf"""
else:
lowerCAmelCase__ = """jax"""
@require_sentencepiece
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = MarianTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
A__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
A__ = dict(zip(lowercase , range(len(lowercase ) ) ) )
A__ = Path(self.tmpdirname )
save_json(lowercase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowercase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
A__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **lowercase ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = "</s>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowercase ) , 9 )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
A__ = en_de_tokenizer(["I am a small frog"] , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(lowercase , batch.input_ids[0] )
A__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase )
A__ = [x.name for x in Path(lowercase ).glob("*" )]
self.assertIn("source.spm" , lowercase )
MarianTokenizer.from_pretrained(lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=lowercase , truncation=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = tok(["I am a tiny frog", "I am a small frog"] , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
A__ = "Tämä on testi"
A__ = "This is a test"
A__ = [76, 7, 2047, 2]
A__ = [69, 12, 11, 940, 2]
A__ = tokenizer(lowercase ).input_ids
self.assertListEqual(lowercase , lowercase )
A__ = tokenizer(text_target=lowercase ).input_ids
self.assertListEqual(lowercase , lowercase )
A__ = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
| 68
|
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
__UpperCamelCase = {'''bert_for_seq_generation''': 512}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<::::>", lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase__)
@property
def a_ ( self) -> Tuple:
return self.sp_model.get_piece_size()
def a_ ( self) -> Optional[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
return self.sp_model.piece_to_id(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = self.sp_model.IdToPiece(lowerCAmelCase__)
return token
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = []
snake_case_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__) + token
snake_case_ = []
else:
current_sub_tokens.append(lowerCAmelCase__)
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
| 69
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
| 0
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return EnvironmentCommand()
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase ( snake_case_ ):
@staticmethod
def lowercase__ ( __snake_case : ArgumentParser ) -> Optional[Any]:
_lowerCAmelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__snake_case )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__snake_case , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__snake_case )
def __init__( self : Dict , __snake_case : Optional[Any] , *__snake_case : Dict ) -> None:
_lowerCAmelCase = accelerate_config_file
def lowercase__ ( self : Optional[int] ) -> Dict:
_lowerCAmelCase = """not installed"""
if is_safetensors_available():
import safetensors
_lowerCAmelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_lowerCAmelCase = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = _lowerCAmelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__snake_case ):
_lowerCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCAmelCase = (
"""\n""".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__snake_case , __snake_case )
else f"\t{accelerate_config}"
)
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """NA"""
if is_torch_available():
import torch
_lowerCAmelCase = torch.__version__
_lowerCAmelCase = torch.cuda.is_available()
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """NA"""
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase = tf.__version__
try:
# deprecated in v2.1
_lowerCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCAmelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """not installed"""
_lowerCAmelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCAmelCase = flax.__version__
_lowerCAmelCase = jax.__version__
_lowerCAmelCase = jaxlib.__version__
_lowerCAmelCase = jax.lib.xla_bridge.get_backend().platform
_lowerCAmelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f"{safetensors_version}",
"""Accelerate version""": f"{accelerate_version}",
"""Accelerate config""": f"{accelerate_config_str}",
"""PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": f"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": f"{flax_version} ({jax_backend})",
"""Jax version""": f"{jax_version}",
"""JaxLib version""": f"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase__ ( __snake_case : str ) -> Dict:
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 70
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ :Tuple = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def A ( a_ ,a_ ) -> Dict:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def A ( a_ ) -> Dict:
__UpperCamelCase : Optional[int] =_TestCommandArgs(dataset=a_ ,all_configs=a_ ,save_infos=a_ )
__UpperCamelCase : Any =TestCommand(*a_ )
test_command.run()
__UpperCamelCase : List[str] =os.path.join(a_ ,'README.md' )
assert os.path.exists(a_ )
__UpperCamelCase : Any =DatasetInfosDict.from_directory(a_ )
__UpperCamelCase : int =DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) ,splits=[
{
'name': 'train',
'num_bytes': 2_351_563,
'num_examples': 10_000,
},
{
'name': 'validation',
'num_bytes': 238_418,
'num_examples': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =getattr(dataset_infos['default'] ,a_ ), getattr(expected_dataset_infos['default'] ,a_ )
if key == "num_bytes":
assert is_apercent_close(a_ ,a_ )
elif key == "splits":
assert list(a_ ) == list(a_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 71
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : List[str], A_ : Tuple, A_ : List[Any]=0 ):
'''simple docstring'''
os.makedirs(A_, exist_ok=A_ )
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase : Union[str, Any] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
_lowerCamelCase : Tuple = os.path.join(A_, A_ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A_, A_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase : Optional[Any] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowerCamelCase : int = os.path.join(A_, A_ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A_, A_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase : List[Any] = os.path.join(A_, F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(A_, exist_ok=A_ )
logger.info(F'''Saving model to {ckpt_dir}''' )
_lowerCamelCase : Tuple = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=A_, storage_writer=dist_cp.FileSystemWriter(A_ ), planner=DefaultSavePlanner(), )
logger.info(F'''Model saved to {ckpt_dir}''' )
def snake_case_ ( A_ : Optional[int], A_ : Union[str, Any], A_ : List[Any], A_ : Union[str, Any], A_ : Optional[int]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_lowerCamelCase : int = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
_lowerCamelCase : List[Any] = os.path.join(A_, A_ )
logger.info(F'''Loading model from {input_model_file}''' )
_lowerCamelCase : Optional[int] = torch.load(A_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase : Any = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowerCamelCase : Tuple = os.path.join(A_, A_ )
logger.info(F'''Loading model from {input_model_file}''' )
_lowerCamelCase : Any = torch.load(A_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase : List[str] = (
os.path.join(A_, F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
_lowerCamelCase : Tuple = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A_, storage_reader=dist_cp.FileSystemReader(A_ ), planner=DefaultLoadPlanner(), )
_lowerCamelCase : Optional[int] = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(A_ )
def snake_case_ ( A_ : Any, A_ : Union[str, Any], A_ : int, A_ : Optional[Any], A_ : int, A_ : Tuple=0 ):
'''simple docstring'''
os.makedirs(A_, exist_ok=A_ )
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase : int = FSDP.optim_state_dict(A_, A_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCamelCase : Any = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowerCamelCase : List[Any] = os.path.join(A_, A_ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(A_, A_ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
_lowerCamelCase : Dict = os.path.join(A_, F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(A_, exist_ok=A_ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(A_ ), planner=DefaultSavePlanner(), )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def snake_case_ ( A_ : Tuple, A_ : Dict, A_ : Any, A_ : Optional[int], A_ : List[str], A_ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase : Any = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCamelCase : Dict = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowerCamelCase : Any = os.path.join(A_, A_ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
_lowerCamelCase : List[Any] = torch.load(A_ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_lowerCamelCase : List[str] = (
os.path.join(A_, F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
_lowerCamelCase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(A_ ), )
_lowerCamelCase : List[str] = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
_lowerCamelCase : List[str] = FSDP.optim_state_dict_to_load(A_, A_, A_ )
optimizer.load_state_dict(A_ )
| 72
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a =logging.getLogger(__name__)
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
_UpperCAmelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : str):
if self.train_file is not None:
__lowerCamelCase : str = self.train_file.split('.')[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCamelCase : List[str] = self.validation_file.split('.')[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
_UpperCAmelCase : PreTrainedTokenizerBase
_UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Union[str, Any] = 'label' if 'label' in features[0].keys() else 'labels'
__lowerCamelCase : Any = [feature.pop(SCREAMING_SNAKE_CASE__) for feature in features]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = len(features[0]['input_ids'])
__lowerCamelCase : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(SCREAMING_SNAKE_CASE__)] for feature in features
]
__lowerCamelCase : List[Any] = list(chain(*SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
__lowerCamelCase : Optional[int] = {k: v.view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,-1) for k, v in batch.items()}
# Add back labels
__lowerCamelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ ,dtype=torch.intaa)
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCamelCase__ , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase : int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCamelCase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCamelCase : Union[str, Any] = {}
if data_args.train_file is not None:
__lowerCamelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase : List[str] = data_args.validation_file
__lowerCamelCase : Optional[Any] = data_args.train_file.split('.' )[-1]
__lowerCamelCase : Optional[Any] = load_dataset(
lowerCamelCase__ , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCamelCase : Optional[Any] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCamelCase : Optional[Any] = [F"ending{i}" for i in range(4 )]
__lowerCamelCase : int = 'sent1'
__lowerCamelCase : Optional[Any] = 'sent2'
if data_args.max_seq_length is None:
__lowerCamelCase : List[Any] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__lowerCamelCase : Union[str, Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__lowerCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase__ ):
__lowerCamelCase : Any = [[context] * 4 for context in examples[context_name]]
__lowerCamelCase : Dict = examples[question_header_name]
__lowerCamelCase : List[str] = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(lowerCamelCase__ )
]
# Flatten out
__lowerCamelCase : Any = list(chain(*lowerCamelCase__ ) )
__lowerCamelCase : List[str] = list(chain(*lowerCamelCase__ ) )
# Tokenize
__lowerCamelCase : List[Any] = tokenizer(
lowerCamelCase__ , lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__lowerCamelCase : Any = raw_datasets['train']
if data_args.max_train_samples is not None:
__lowerCamelCase : Optional[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
__lowerCamelCase : Any = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__lowerCamelCase : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__lowerCamelCase : Tuple = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__lowerCamelCase : Optional[int] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
__lowerCamelCase : Optional[int] = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__lowerCamelCase : Dict = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCamelCase : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = eval_predictions
__lowerCamelCase : Union[str, Any] = np.argmax(lowerCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCamelCase : List[str] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
__lowerCamelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : int = last_checkpoint
__lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase : int = train_result.metrics
__lowerCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
__lowerCamelCase : Tuple = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('train' , lowerCamelCase__ )
trainer.save_metrics('train' , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Tuple = trainer.evaluate()
__lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('eval' , lowerCamelCase__ )
trainer.save_metrics('eval' , lowerCamelCase__ )
__lowerCamelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 0
|
"""simple docstring"""
import math
import random
def _snake_case ( snake_case__ : float , snake_case__ : bool = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_lowercase = 0.02
def _snake_case ( snake_case__ : int , snake_case__ : int ):
A = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
A = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A = (expected / 100) - layer_a
# Error delta
A = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input('''Expected value: '''))
_lowercase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 74
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41
| 0
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any]=None , **__snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()]
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
lowerCamelCase_ =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
import collections
import os
import re
from pathlib import Path
a_ = 'src/transformers'
# Matches is_xxx_available()
a_ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a_ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a_ = re.compile(r'^\s*try:')
# Catches a line with else:
a_ = re.compile(r'^\s*else:')
def lowerCamelCase__ ( _a):
if _re_test_backend.search(_a) is None:
return None
SCREAMING_SNAKE_CASE : int = [b[0] for b in _re_backend.findall(_a)]
backends.sort()
return "_and_".join(_a)
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8" , newline="\n") as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Any = 0
while line_index < len(_a) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_a):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE : Dict = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
SCREAMING_SNAKE_CASE : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_a):
SCREAMING_SNAKE_CASE : List[Any] = _re_one_line_import_struct.search(_a).groups()[0]
SCREAMING_SNAKE_CASE : Any = re.findall(r"\[([^\]]+)\]" , _a)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
SCREAMING_SNAKE_CASE : Tuple = _re_import_struct_key_value.search(_a)
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(_a) > 0]
objects.extend(_a)
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
line_index += 1
SCREAMING_SNAKE_CASE : Any = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE : Any = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
SCREAMING_SNAKE_CASE : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(_a) is not None:
objects.append(_re_import_struct_add_one.search(_a).groups()[0])
elif _re_import_struct_add_many.search(_a) is not None:
SCREAMING_SNAKE_CASE : Any = _re_import_struct_add_many.search(_a).groups()[0].split(", ")
SCREAMING_SNAKE_CASE : Optional[int] = [obj[1:-1] for obj in imports if len(_a) > 0]
objects.extend(_a)
elif _re_between_brackets.search(_a) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = _re_between_brackets.search(_a).groups()[0].split(", ")
SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in imports if len(_a) > 0]
objects.extend(_a)
elif _re_quote_object.search(_a) is not None:
objects.append(_re_quote_object.search(_a).groups()[0])
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + "\""):
objects.append(line[13:-3])
line_index += 1
SCREAMING_SNAKE_CASE : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE : Any = []
while (
line_index < len(_a)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
SCREAMING_SNAKE_CASE : int = _re_import.search(_a)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
SCREAMING_SNAKE_CASE : str = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_a):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE : int = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
SCREAMING_SNAKE_CASE : Dict = _re_import.search(_a)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
SCREAMING_SNAKE_CASE : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ ( _a , _a):
def find_duplicates(_a):
return [k for k, v in collections.Counter(_a).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE : Dict = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE : List[Any] = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
SCREAMING_SNAKE_CASE : Dict = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
SCREAMING_SNAKE_CASE : Dict = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = []
for root, _, files in os.walk(_a):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_a , "__init__.py")
SCREAMING_SNAKE_CASE : List[str] = parse_init(_a)
if objects is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_results(*_a)
if len(_a) > 0:
SCREAMING_SNAKE_CASE : List[str] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(_a))
if len(_a) > 0:
raise ValueError("\n\n".join(_a))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Dict = []
for path, directories, files in os.walk(_a):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(_a)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_a) / folder).glob("*.py"))) == 0:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = str((Path(_a) / folder).relative_to(_a))
SCREAMING_SNAKE_CASE : int = short_path.replace(os.path.sep , ".")
submodules.append(_a)
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE : Dict = str((Path(_a) / fname).relative_to(_a))
SCREAMING_SNAKE_CASE : int = short_path.replace(".py" , "").replace(os.path.sep , ".")
if len(submodule.split(".")) == 1:
submodules.append(_a)
return submodules
a_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def lowerCamelCase__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE : str = direct_transformers_import(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_a , "__init__.py") , "r") as f:
SCREAMING_SNAKE_CASE : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _a)))
SCREAMING_SNAKE_CASE : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_a) > 0:
SCREAMING_SNAKE_CASE : int = "\n".join(f"- {module}" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 76
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: str=0.6 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
lowerCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = TFViTMAEModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : str = outputs_dict[0].numpy()
lowerCamelCase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCamelCase_ ( self: Dict ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = v.numpy()
else:
lowerCamelCase__ : Union[str, Any] = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = prepare_numpy_arrays(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: str ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
lowerCamelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ : List[str] = main_layer_class(UpperCamelCase__ )
lowerCamelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ : List[str] = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
lowerCamelCase__ : int = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : Any = outputs.last_hidden_state.numpy()
lowerCamelCase__ : List[str] = 0
else:
lowerCamelCase__ : int = outputs.logits.numpy()
lowerCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : Dict = model_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : str = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : Union[str, Any] = after_outputs["""logits"""].numpy()
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
def lowerCamelCase_ ( self: Any ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
lowerCamelCase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ : int = model_class.from_config(model.config )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[str] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : Tuple = ViTMAEConfig()
lowerCamelCase__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ : str = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 41
| 0
|
"""simple docstring"""
import random
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(_lowerCAmelCase )
elif element > pivot:
greater.append(_lowerCAmelCase )
else:
equal.append(_lowerCAmelCase )
return less, equal, greater
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ):
'''simple docstring'''
if index >= len(_lowerCAmelCase ) or index < 0:
return None
lowercase__ : List[str] = items[random.randint(0 , len(_lowerCAmelCase ) - 1 )]
lowercase__ : str = 0
lowercase__ , lowercase__ , lowercase__ : str = _partition(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Tuple = len(_lowerCAmelCase )
lowercase__ : Any = len(_lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCAmelCase , _lowerCAmelCase )
# must be in larger
else:
return quick_select(_lowerCAmelCase , index - (m + count) )
| 77
|
'''simple docstring'''
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: list[int] ):
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> list:
'''simple docstring'''
if n_term == "":
return []
_A = []
for temp in range(int(__lowercase ) ):
series.append(F'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 79
|
'''simple docstring'''
from __future__ import annotations
_A : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: dict[str, list[str]] , UpperCamelCase__: str ):
lowerCamelCase__ : str = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Any = source_vertex
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = {self.source_vertex}
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Tuple = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCamelCase__ : List[str] = vertex
queue.append(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCamelCase__ : int = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_A : int =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 41
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ : Any = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCamelCase_ : Any = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ):
"""simple docstring"""
a =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
a =bs[:]
a =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
a =[chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _A ( lowercase ):
"""simple docstring"""
a =set()
a =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a =char
return pairs
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> Union[str, Any]:
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding='''utf-8''' ) as vocab_handle:
a =json.load(__A )
a ={v: k for k, v in self.encoder.items()}
a =errors # how to handle errors in decoding
a =bytes_to_unicode()
a ={v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding='''utf-8''' ) as merges_handle:
a =merges_handle.read().split('''\n''' )[1:-1]
a =[tuple(merge.split() ) for merge in bpe_merges]
a =dict(zip(__A , range(len(__A ) ) ) )
a ={}
a =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
a =tuple(__A )
a =get_pairs(__A )
if not pairs:
return token
while True:
a =min(__A , key=lambda __A : self.bpe_ranks.get(__A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a =bigram
a =[]
a =0
while i < len(__A ):
try:
a =word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a =j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a =tuple(__A )
a =new_word
if len(__A ) == 1:
break
else:
a =get_pairs(__A )
a =''' '''.join(__A )
a =word
return word
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
a =[]
for token in re.findall(self.pat , __A ):
a =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.decoder.get(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a =''''''.join(__A )
a =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '''\n''' )
a =0
with open(__A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
a =token_index
writer.write(''' '''.join(__A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , __A , __A=False , **__A ) -> Union[str, Any]:
a =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
a =''' ''' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> str:
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[int]:
a =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
a =''' '''.join(__A )
a =self.encode(__A )
if len(__A ) > self.model_max_length:
a =input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 81
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41
| 0
|
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = XLMTokenizer
lowercase__ = False
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_UpperCamelCase : int = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase : Dict = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_UpperCamelCase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'lower newer'
_UpperCamelCase : Dict = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = XLMTokenizer(self.vocab_file ,self.merges_file )
_UpperCamelCase : List[str] = 'lower'
_UpperCamelCase : int = ['low', 'er</w>']
_UpperCamelCase : int = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = tokens + ['<unk>']
_UpperCamelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Dict = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
_UpperCamelCase : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokenizer.encode('multi-sequence build' ,add_special_tokens=lowerCamelCase__ )
_UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 83
|
'''simple docstring'''
from __future__ import annotations
import requests
_A : str =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ) -> dict:
lowerCamelCase__ : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
lowerCamelCase__ : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : str = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
lowerCamelCase__ : Dict = {}
for id_ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 41
| 0
|
"""simple docstring"""
from math import isclose, sqrt
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = point_y / 4 / point_x
lowerCAmelCase_ :Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase_ :Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase_ :str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase_ :Tuple = outgoing_gradient**2 + 4
lowerCAmelCase_ :Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase_ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCAmelCase_ :Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase_ :Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase_ :List[Any] = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
lowerCAmelCase_ :List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _snake_case ( lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :float = first_x_coord
lowerCAmelCase_ :float = first_y_coord
lowerCAmelCase_ :float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_A : Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
lowerCamelCase__ : Optional[int] = """"""
if is_panoptic:
lowerCamelCase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:256, :]
lowerCamelCase__ : Any = in_proj_bias[:256]
lowerCamelCase__ : str = in_proj_weight[256:512, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase__ : Dict = in_proj_weight[-256:, :]
lowerCamelCase__ : str = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ : Any = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : int = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ : List[str] = 250
else:
lowerCamelCase__ : int = 91
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """coco-detection-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase )
# prepare image
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval()
lowerCamelCase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Tuple = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A : Optional[Any] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41
| 0
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
_SCREAMING_SNAKE_CASE : List[Any] = "naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = DonutProcessor.from_pretrained(a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
snake_case_ = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
snake_case_ = self.processor.tokenajson(a__ )
self.assertDictEqual(a__ , a__ )
| 85
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""PerceiverFeatureExtractor"""]
lowerCamelCase__ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41
| 0
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowercase_ ( _lowerCamelCase : str = "laptop"):
lowercase__ : Optional[Any] = f'''https://www.amazon.in/laptop/s?k={product}'''
lowercase__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowercase__ : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase).text)
# Initialize a Pandas dataframe with the column titles
lowercase__ : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"}) , ):
try:
lowercase__ : List[str] = item.ha.text
lowercase__ : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
lowercase__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"}).text
try:
lowercase__ : Tuple = item.find("span" , attrs={"class": "a-icon-alt"}).text
except AttributeError:
lowercase__ : List[Any] = "Not available"
try:
lowercase__ : int = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"}).text.split("₹")[1]
)
except AttributeError:
lowercase__ : Any = ""
try:
lowercase__ : Optional[Any] = float(
(
(
float(product_mrp.strip("₹").replace("," , ""))
- float(product_price.strip("₹").replace("," , ""))
)
/ float(product_mrp.strip("₹").replace("," , ""))
)
* 100)
except ValueError:
lowercase__ : Dict = float("nan")
except AttributeError:
pass
lowercase__ : Tuple = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase__ : str = " "
lowercase__ : Dict = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase = '''headphones'''
get_amazon_product_data(product).to_csv(f"Amazon Product Data for {product}.csv")
| 87
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a__ ( A_, A_=None ):
'''simple docstring'''
__magic_name__ = None
if token is not None:
__magic_name__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
__magic_name__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__magic_name__ = requests.get(A_, headers=A_ ).json()
__magic_name__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__magic_name__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A_ ):
__magic_name__ = requests.get(url + f'''&page={i + 2}''', headers=A_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def a__ ( A_, A_=None ):
'''simple docstring'''
__magic_name__ = None
if token is not None:
__magic_name__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
__magic_name__ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__magic_name__ = requests.get(A_, headers=A_ ).json()
__magic_name__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__magic_name__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(A_ ):
__magic_name__ = requests.get(url + f'''&page={i + 2}''', headers=A_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = None
if token is not None:
__magic_name__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
__magic_name__ = requests.get(A_, headers=A_, allow_redirects=A_ )
__magic_name__ = result.headers["""Location"""]
__magic_name__ = requests.get(A_, allow_redirects=A_ )
__magic_name__ = os.path.join(A_, f'''{artifact_name}.zip''' )
with open(A_, """wb""" ) as fp:
fp.write(response.content )
def a__ ( A_, A_=None ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = []
__magic_name__ = None
with zipfile.ZipFile(A_ ) as z:
for filename in z.namelist():
if not os.path.isdir(A_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A_ ) as f:
for line in f:
__magic_name__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__magic_name__ = line[: line.index(""": """ )]
__magic_name__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__magic_name__ = line[len("""FAILED """ ) :]
failed_tests.append(A_ )
elif filename == "job_name.txt":
__magic_name__ = line
if len(A_ ) != len(A_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(A_ )} for `errors` '''
f'''and {len(A_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
__magic_name__ = None
if job_name and job_links:
__magic_name__ = job_links.get(A_, A_ )
# A list with elements of the form (line of error, error, failed test)
__magic_name__ = [x + [y] + [job_link] for x, y in zip(A_, A_ )]
return result
def a__ ( A_, A_=None ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = [os.path.join(A_, A_ ) for p in os.listdir(A_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A_, job_links=A_ ) )
return errors
def a__ ( A_, A_=None ):
'''simple docstring'''
__magic_name__ = Counter()
counter.update([x[1] for x in logs] )
__magic_name__ = counter.most_common()
__magic_name__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__magic_name__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__magic_name__ = dict(sorted(r.items(), key=lambda A_ : item[1]["count"], reverse=A_ ) )
return r
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__magic_name__ = test.split("""/""" )[2]
else:
__magic_name__ = None
return test
def a__ ( A_, A_=None ):
'''simple docstring'''
__magic_name__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
__magic_name__ = [x for x in logs if x[2] is not None]
__magic_name__ = {x[2] for x in logs}
__magic_name__ = {}
for test in tests:
__magic_name__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__magic_name__ = counter.most_common()
__magic_name__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__magic_name__ = sum(error_counts.values() )
if n_errors > 0:
__magic_name__ = {"""count""": n_errors, """errors""": error_counts}
__magic_name__ = dict(sorted(r.items(), key=lambda A_ : item[1]["count"], reverse=A_ ) )
return r
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = """| no. | error | status |"""
__magic_name__ = """|-:|:-|:-|"""
__magic_name__ = [header, sep]
for error in reduced_by_error:
__magic_name__ = reduced_by_error[error]["""count"""]
__magic_name__ = f'''| {count} | {error[:100]} | |'''
lines.append(A_ )
return "\n".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = """| model | no. of errors | major error | count |"""
__magic_name__ = """|-:|-:|-:|-:|"""
__magic_name__ = [header, sep]
for model in reduced_by_model:
__magic_name__ = reduced_by_model[model]["""count"""]
__magic_name__ , __magic_name__ = list(reduced_by_model[model]["""errors"""].items() )[0]
__magic_name__ = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(A_ )
return "\n".join(A_ )
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__lowerCAmelCase : List[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__lowerCAmelCase : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
__lowerCAmelCase : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__lowerCAmelCase : List[Any] = k.find(' / ')
__lowerCAmelCase : Optional[Any] = k[index + len(' / ') :]
__lowerCAmelCase : Optional[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__lowerCAmelCase : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__lowerCAmelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__lowerCAmelCase : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__lowerCAmelCase : List[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__lowerCAmelCase : int = reduce_by_error(errors)
__lowerCAmelCase : Union[str, Any] = reduce_by_model(errors)
__lowerCAmelCase : Dict = make_github_table(reduced_by_error)
__lowerCAmelCase : Any = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 88
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
__lowerCamelCase = self.builder.as_dataset(
split='train' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCamelCase = dataset
__lowerCamelCase = name
__lowerCamelCase = con
__lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase = num_proc
__lowerCamelCase = to_sql_kwargs
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.to_sql_kwargs.pop('sql' , lowerCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('con' , lowerCamelCase__ )
__lowerCamelCase = self.to_sql_kwargs.pop('index' , lowerCamelCase__ )
__lowerCamelCase = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = args
__lowerCamelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__lowerCamelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase = batch.to_pandas()
__lowerCamelCase = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowerCamelCase , __lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 90
|
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
"""simple docstring"""
def _A (__a = 50 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCAmelCase = len(_A ) - 1
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) , 5 ) == 1
return output_values
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = self.basis_function(_A )
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __SCREAMING_SNAKE_CASE( self , _A = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowerCAmelCase = [] # x coordinates of points to plot
__lowerCAmelCase = [] # y coordinates of points to plot
__lowerCAmelCase = 0.0
while t <= 1:
__lowerCAmelCase = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowerCAmelCase = [i[0] for i in self.list_of_points]
__lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
_A , _A , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(_A , _A , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 92
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = AltDiffusionPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowercase_ : Dict = CLIPTextModel(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ : Union[str, Any] = 77
lowercase_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : List[Any] = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = text_encoder
lowercase_ : int = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = '''A photo of an astronaut'''
lowercase_ : Optional[int] = alt_pipe(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowercase_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ : str = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = text_encoder
lowercase_ : List[str] = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = alt_pipe(**__SCREAMING_SNAKE_CASE )
lowercase_ : Any = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Optional[Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Optional[Any] = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase_ : Tuple = output.images
lowercase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase_ : Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ : Optional[int] = torch.manual_seed(0 )
lowercase_ : Optional[Any] = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' )
lowercase_ : Tuple = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : str = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case : Tuple = logging.get_logger(__name__)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = question_encoder
a :Any = generator
a :List[str] = self.question_encoder
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if os.path.isfile(_lowerCamelCase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
a :Tuple = os.path.join(_lowerCamelCase , '''question_encoder_tokenizer''' )
a :List[str] = os.path.join(_lowerCamelCase , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_lowerCamelCase )
self.generator.save_pretrained(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , **_lowerCamelCase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
a :Optional[int] = kwargs.pop('''config''' , _lowerCamelCase )
if config is None:
a :Any = RagConfig.from_pretrained(_lowerCamelCase )
a :List[Any] = AutoTokenizer.from_pretrained(
_lowerCamelCase , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
a :str = AutoTokenizer.from_pretrained(
_lowerCamelCase , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_lowerCamelCase , generator=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.current_tokenizer(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.generator.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.generator.decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.question_encoder
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.generator
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "longest" , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _lowerCamelCase , )
if max_length is None:
a :Any = self.current_tokenizer.model_max_length
a :Dict = self(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
a :Union[str, Any] = self.current_tokenizer.model_max_length
a :Union[str, Any] = self(
text_target=_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase , )
a :Optional[int] = labels['''input_ids''']
return model_inputs
| 94
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41
| 0
|
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : int = get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Tuple:
'''simple docstring'''
a__ : int =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Any =module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class __lowerCAmelCase :
_lowercase : List[Any] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] =obj
a__ : Tuple =target
a__ : Tuple =new
a__ : str =target.split("." )[0]
a__ : str ={}
a__ : int =attrs or []
def __enter__( self ) -> str:
'''simple docstring'''
*a__ , a__ : List[Any] =self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
a__ : Optional[int] =import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ : Optional[int] =getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
a__ : Dict =obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
a__ : Union[str, Any] =getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
a__ : int =getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ : Any =getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
a__ : Optional[Any] =getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ : Dict =globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *lowerCAmelCase__ ) -> str:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def _lowercase ( self ) -> str:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 95
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 0
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase, _lowerCamelCase : List[str] = emb.weight.shape
_lowerCamelCase : Union[str, Any] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
_lowerCamelCase : List[str] = emb.weight.data
return lin_layer
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = torch.load(lowercase__ , map_location='cpu' )
_lowerCamelCase : Tuple = Namespace(**checkpoint['cfg']['model'] )
_lowerCamelCase : Optional[int] = checkpoint['model']
remove_ignore_keys_(lowercase__ )
_lowerCamelCase : int = state_dict['decoder.embed_tokens.weight'].shape[0]
_lowerCamelCase : Union[str, Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_lowerCamelCase : Tuple = XGLMConfig(
vocab_size=lowercase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_lowerCamelCase : Union[str, Any] = XGLMForCausalLM(lowercase__ )
_lowerCamelCase : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
print(lowercase__ )
_lowerCamelCase : Union[str, Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase__ = parser.parse_args()
lowercase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 96
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LayoutLMv2FeatureExtractor''']
__snake_case = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 97
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: str=0.6 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
lowerCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = TFViTMAEModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : str = outputs_dict[0].numpy()
lowerCamelCase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCamelCase_ ( self: Dict ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = v.numpy()
else:
lowerCamelCase__ : Union[str, Any] = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = prepare_numpy_arrays(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: str ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
lowerCamelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ : List[str] = main_layer_class(UpperCamelCase__ )
lowerCamelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ : List[str] = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
lowerCamelCase__ : int = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : Any = outputs.last_hidden_state.numpy()
lowerCamelCase__ : List[str] = 0
else:
lowerCamelCase__ : int = outputs.logits.numpy()
lowerCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : Dict = model_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : str = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : Union[str, Any] = after_outputs["""logits"""].numpy()
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
def lowerCamelCase_ ( self: Any ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
lowerCamelCase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ : int = model_class.from_config(model.config )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[str] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : Tuple = ViTMAEConfig()
lowerCamelCase__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ : str = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 41
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
a__ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
a__ : int = audio_length_in_s * self.unet.config.sample_rate
a__ : Union[str, Any] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
a__ : str = int(lowercase)
if sample_size % down_scale_factor != 0:
a__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.')
a__ : List[Any] = int(lowercase)
a__ : int = next(iter(self.unet.parameters())).dtype
a__ : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase)
# set step values
self.scheduler.set_timesteps(lowercase , device=audio.device)
a__ : Union[str, Any] = self.scheduler.timesteps.to(lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
a__ : Dict = self.unet(lowercase , lowercase).sample
# 2. compute previous image: x_t -> t_t-1
a__ : Any = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample
a__ : str = audio.clamp(-1 , 1).float().cpu().numpy()
a__ : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase)
| 99
|
'''simple docstring'''
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: list[int] ):
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
"""simple docstring"""
from maths.prime_check import is_prime
def _lowerCAmelCase ( UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase_ )
if is_prime(UpperCamelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ :Union[str, Any] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101
|
'''simple docstring'''
from __future__ import annotations
_A : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: dict[str, list[str]] , UpperCamelCase__: str ):
lowerCamelCase__ : str = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Any = source_vertex
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = {self.source_vertex}
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Tuple = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCamelCase__ : List[str] = vertex
queue.append(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCamelCase__ : int = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_A : int =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 41
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase ( _snake_case : str ) ->Tuple:
"""simple docstring"""
__snake_case : Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowercase ( _snake_case : Tuple ) ->Tuple:
"""simple docstring"""
__snake_case , __snake_case : str = emb.weight.shape
__snake_case : int = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple=None ) ->Dict:
"""simple docstring"""
__snake_case : Optional[Any] = {}
for old_key in state_dict.keys():
__snake_case : List[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Optional[int] = key.replace('''moe_layer.experts.0''' , f"""ffn.experts.expert_{expert_idx}""" )
else:
__snake_case : Tuple = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
__snake_case : Union[str, Any] = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
__snake_case : List[Any] = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
__snake_case : Optional[int] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
__snake_case : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
__snake_case : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
__snake_case : Union[str, Any] = state_dict[old_key]
return new_dict
def lowercase ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int , _snake_case : str = WEIGHTS_NAME ) ->Tuple:
"""simple docstring"""
__snake_case : int = []
__snake_case : Any = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
__snake_case : Tuple = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_snake_case ):
__snake_case : Dict = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
__snake_case : str = rename_fairseq_keys(_snake_case , _snake_case )
__snake_case : Union[str, Any] = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f"""-{len(_snake_case )+1:05d}-of-???.bin""" ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
__snake_case : Dict = os.path.join(_snake_case , weights_name.replace('''.bin''' , f"""-{len(_snake_case )+1:05d}-of-???.bin""" ) )
__snake_case : Optional[Any] = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
__snake_case : str = rename_fairseq_keys(_snake_case , _snake_case )
__snake_case : Any = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
__snake_case : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
__snake_case : Union[str, Any] = {}
for idx, shard in enumerate(_snake_case ):
__snake_case : Dict = weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-{len(_snake_case ):05d}.bin""" )
__snake_case : List[Any] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
__snake_case : List[str] = shard_file
# Add the metadata
__snake_case : Dict = {'''total_size''': total_size}
__snake_case : Dict = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
__snake_case : Optional[Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
SCREAMING_SNAKE_CASE : Any = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
SCREAMING_SNAKE_CASE : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 102
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41
| 0
|
import os
# Precomputes a list of the 100 first triangular numbers
A__ : Dict = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCamelCase( ):
lowerCAmelCase_ : List[Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase ,'''words.txt''' )
lowerCAmelCase_ : List[str] = ''''''
with open(__UpperCamelCase ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readline()
lowerCAmelCase_ : int = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
lowerCAmelCase_ : Any = [
word
for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 103
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41
| 0
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "" ) ->dict[str, float]:
'''simple docstring'''
a : List[str] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
a : Dict = BeautifulSoup(requests.get(_lowercase ).text , "html.parser" )
a : Union[str, Any] = soup.find_all("td" , attrs="titleColumn" )
a : int = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowercase , _lowercase )
}
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "IMDb_Top_250_Movies.csv" ) ->None:
'''simple docstring'''
a : Optional[int] = get_imdb_top_aaa_movies()
with open(_lowercase , "w" , newline="" ) as out_file:
a : int = csv.writer(_lowercase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 105
|
'''simple docstring'''
from __future__ import annotations
import requests
_A : str =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ) -> dict:
lowerCamelCase__ : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
lowerCamelCase__ : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : str = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
lowerCamelCase__ : Dict = {}
for id_ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 41
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids ,lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : Tuple ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
lowerCAmelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
lowerCAmelCase__ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = dc.update(3 )
lowerCAmelCase__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : List[Any] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 106
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_A : Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
lowerCamelCase__ : Optional[int] = """"""
if is_panoptic:
lowerCamelCase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:256, :]
lowerCamelCase__ : Any = in_proj_bias[:256]
lowerCamelCase__ : str = in_proj_weight[256:512, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase__ : Dict = in_proj_weight[-256:, :]
lowerCamelCase__ : str = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ : Any = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : int = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ : List[str] = 250
else:
lowerCamelCase__ : int = 91
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """coco-detection-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase )
# prepare image
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval()
lowerCamelCase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Tuple = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A : Optional[Any] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
if self.train_file is not None:
a = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self : Dict , __lowerCamelCase : int ) -> List[Any]:
a = "label" if "label" in features[0].keys() else "labels"
a = [feature.pop(__lowerCamelCase ) for feature in features]
a = len(__lowerCamelCase )
a = len(features[0]["input_ids"] )
a = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features
]
a = list(chain(*__lowerCamelCase ) )
a = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
a = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
a = torch.tensor(__lowerCamelCase , dtype=torch.intaa )
return batch
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", A, A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a = training_args.get_process_log_level()
logger.setLevel(A )
datasets.utils.logging.set_verbosity(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a = {}
if data_args.train_file is not None:
a = data_args.train_file
if data_args.validation_file is not None:
a = data_args.validation_file
a = data_args.train_file.split("." )[-1]
a = load_dataset(
A, data_files=A, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
a = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=A, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a = [F"""ending{i}""" for i in range(4 )]
a = "sent1"
a = "sent2"
if data_args.max_seq_length is None:
a = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
a = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
a = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(A : Dict ):
a = [[context] * 4 for context in examples[context_name]]
a = examples[question_header_name]
a = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(A )
]
# Flatten out
a = list(chain(*A ) )
a = list(chain(*A ) )
# Tokenize
a = tokenizer(
A, A, truncation=A, max_length=A, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(A ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
a = raw_datasets["train"]
if data_args.max_train_samples is not None:
a = min(len(A ), data_args.max_train_samples )
a = train_dataset.select(range(A ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
a = train_dataset.map(
A, batched=A, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
a = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
a = min(len(A ), data_args.max_eval_samples )
a = eval_dataset.select(range(A ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
a = eval_dataset.map(
A, batched=A, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
a = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=A, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(A : Any ):
a , a = eval_predictions
a = np.argmax(A, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a = Trainer(
model=A, args=A, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=A, data_collator=A, compute_metrics=A, )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=A )
trainer.save_model() # Saves the tokenizer too for easy upload
a = train_result.metrics
a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A )
)
a = min(A, len(A ) )
trainer.log_metrics("train", A )
trainer.save_metrics("train", A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A )
a = min(A, len(A ) )
trainer.log_metrics("eval", A )
trainer.save_metrics("eval", A )
a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 107
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict ="roberta"
def __init__( self , snake_case__=50_265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : List[Any] = position_embedding_type
lowerCAmelCase : Any = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 108
|
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : List[Any] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs() )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Optional[int] = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.get_dummy_inputs()
UpperCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : int = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : List[Any] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = self.get_dummy_inputs()
UpperCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase : Dict = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = ort.SessionOptions()
UpperCAmelCase : str = False
return options
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = """A fantasy landscape, trending on artstation"""
UpperCAmelCase : str = np.random.RandomState(0 )
UpperCAmelCase : str = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase : Any = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase : int = init_image.resize((768, 512) )
UpperCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase : List[str] = np.random.RandomState(0 )
UpperCAmelCase : Any = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
UpperCAmelCase : List[str] = output.images
UpperCAmelCase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = BlipImageProcessor()
lowercase__ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowercase__ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
lowercase__ = InstructBlipProcessor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def lowerCamelCase_ ( self: Union[str, Any] , **UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).qformer_tokenizer
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
lowercase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(UpperCamelCase_ , return_tensors='''np''' )
lowercase__ = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=UpperCamelCase_ )
lowercase__ = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
lowercase__ = qformer_tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(UpperCamelCase_ )
lowercase__ = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_qformer_tokenizer()
lowercase__ = InstructBlipProcessor(
tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 110
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Dict = hf_hub_url(repo_id=_lowerCAmelCase , path=_lowerCAmelCase , revision=_lowerCAmelCase )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCAmelCase )}"
| 35
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case__(_lowercase ):
"""simple docstring"""
lowercase_ = """vivit"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[Any]=224 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : int=[2, 16, 16] , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=768 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : List[str]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]="gelu_fast" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=1E-0_6 , SCREAMING_SNAKE_CASE : List[str]=True , **SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : List[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : int = image_size
lowercase__ : str = num_frames
lowercase__ : Optional[Any] = tubelet_size
lowercase__ : int = num_channels
lowercase__ : Any = qkv_bias
super().__init__(**UpperCamelCase__ )
| 130
|
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
from functools import lru_cache
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase )
if n > 1:
factors.add(lowerCAmelCase )
return factors
@lru_cache
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return len(unique_prime_factors(lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return len(set(lowerCAmelCase ) ) in (0, 1)
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 2
while True:
# Increment each value of a generated range
SCREAMING_SNAKE_CASE_ : List[Any] = [base + i for i in range(lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
SCREAMING_SNAKE_CASE_ : Dict = [upf_len(lowerCAmelCase ) for x in group]
checker.append(lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def _snake_case ( lowerCAmelCase : str = 4 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = run(lowerCAmelCase )
return results[0] if len(lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 18
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
| 0
|
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE_:str = '''facebook/wmt19-en-de'''
SCREAMING_SNAKE_CASE_:List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE_:Any = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE_:List[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:Optional[int] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE_:Any = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 116
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
| 0
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A : List[str] = logging.get_logger(__name__)
__A : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_lowercase , metadata={"help": "Model type selected in the list: " + ", ".join(_lowercase )} )
SCREAMING_SNAKE_CASE_ : List[str] = field(
default=_lowercase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
SCREAMING_SNAKE_CASE_ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
SCREAMING_SNAKE_CASE_ : Any = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = field(
default=_lowercase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
SCREAMING_SNAKE_CASE_ : Dict = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
SCREAMING_SNAKE_CASE_ : Dict = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _UpperCAmelCase ( _lowercase ):
SCREAMING_SNAKE_CASE_ : int = "train"
SCREAMING_SNAKE_CASE_ : Tuple = "dev"
class _UpperCAmelCase ( _lowercase ):
SCREAMING_SNAKE_CASE_ : List[Any] = 42
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42
SCREAMING_SNAKE_CASE_ : Optional[int] = 42
def __init__( self : Optional[Any] , A : SquadDataTrainingArguments , A : PreTrainedTokenizer , A : Optional[int] = None , A : Union[str, Split] = Split.train , A : Optional[bool] = False , A : Optional[str] = None , A : Optional[str] = "pt" , ) -> List[str]:
lowercase_ : Optional[Any] = args
lowercase_ : Optional[int] = is_language_sensitive
lowercase_ : Optional[int] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
lowercase_ : Optional[int] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowercase_ : str = mode
# Load data features from cache or dataset file
lowercase_ : str = """v2""" if args.version_2_with_negative else """v1"""
lowercase_ : int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ : Optional[Any] = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
lowercase_ : Tuple = time.time()
lowercase_ : Union[str, Any] = torch.load(UpperCamelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase_ : int = self.old_features["""features"""]
lowercase_ : Optional[Any] = self.old_features.get('''dataset''' , UpperCamelCase__ )
lowercase_ : Any = self.old_features.get('''examples''' , UpperCamelCase__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowercase_ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowercase_ : Optional[int] = self.processor.get_train_examples(args.data_dir )
lowercase_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCamelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase__ , )
lowercase_ : int = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , UpperCamelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[int] ) -> Any:
return len(self.features )
def __getitem__( self : int , A : List[Any] ) -> List[str]:
# Convert to Tensors and build dataset
lowercase_ : Any = self.features[i]
lowercase_ : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase_ : List[str] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase_ : Any = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase_ : int = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase_ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase_ : Any = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase_ : int = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
lowercase_ : Optional[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 33
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[Any] = checkpoint
lowercase__ : Optional[Any] = {}
lowercase__ : Dict = vae_state_dict["""encoder.conv_in.weight"""]
lowercase__ : List[str] = vae_state_dict["""encoder.conv_in.bias"""]
lowercase__ : str = vae_state_dict["""encoder.conv_out.weight"""]
lowercase__ : Union[str, Any] = vae_state_dict["""encoder.conv_out.bias"""]
lowercase__ : Dict = vae_state_dict["""encoder.norm_out.weight"""]
lowercase__ : Dict = vae_state_dict["""encoder.norm_out.bias"""]
lowercase__ : Dict = vae_state_dict["""decoder.conv_in.weight"""]
lowercase__ : Dict = vae_state_dict["""decoder.conv_in.bias"""]
lowercase__ : int = vae_state_dict["""decoder.conv_out.weight"""]
lowercase__ : int = vae_state_dict["""decoder.conv_out.bias"""]
lowercase__ : Optional[int] = vae_state_dict["""decoder.norm_out.weight"""]
lowercase__ : Tuple = vae_state_dict["""decoder.norm_out.bias"""]
lowercase__ : Optional[int] = vae_state_dict["""quant_conv.weight"""]
lowercase__ : Optional[Any] = vae_state_dict["""quant_conv.bias"""]
lowercase__ : str = vae_state_dict["""post_quant_conv.weight"""]
lowercase__ : List[str] = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
lowercase__ : int = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowercase__ : Any = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ : Tuple = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowercase__ : List[str] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(_lowerCAmelCase )
}
for i in range(_lowerCAmelCase ):
lowercase__ : Tuple = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowercase__ : List[str] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowercase__ : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowercase__ : List[Any] = renew_vae_resnet_paths(_lowerCAmelCase )
lowercase__ : Optional[int] = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
lowercase__ : Tuple = [key for key in vae_state_dict if """encoder.mid.block""" in key]
lowercase__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : List[Any] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowercase__ : Union[str, Any] = renew_vae_resnet_paths(_lowerCAmelCase )
lowercase__ : int = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
lowercase__ : Tuple = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
lowercase__ : List[str] = renew_vae_attention_paths(_lowerCAmelCase )
lowercase__ : Any = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
lowercase__ : Union[str, Any] = num_up_blocks - 1 - i
lowercase__ : List[str] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowercase__ : Dict = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowercase__ : Any = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowercase__ : Optional[Any] = renew_vae_resnet_paths(_lowerCAmelCase )
lowercase__ : Dict = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
lowercase__ : Optional[int] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
lowercase__ : Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ : Tuple = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowercase__ : Tuple = renew_vae_resnet_paths(_lowerCAmelCase )
lowercase__ : str = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
lowercase__ : List[Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
lowercase__ : Optional[int] = renew_vae_attention_paths(_lowerCAmelCase )
lowercase__ : List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , additional_replacements=[meta_path] , config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
return new_checkpoint
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
'''simple docstring'''
lowercase__ : Any = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowercase__ : Tuple = io.BytesIO(r.content )
lowercase__ : Union[str, Any] = OmegaConf.load(_lowerCAmelCase )
lowercase__ : List[Any] = 512
lowercase__ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowercase__ : Optional[int] = {}
with safe_open(_lowerCAmelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
lowercase__ : Any = f.get_tensor(_lowerCAmelCase )
else:
lowercase__ : Any = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )["""state_dict"""]
# Convert the VAE model.
lowercase__ : List[Any] = create_vae_diffusers_config(_lowerCAmelCase , image_size=_lowerCAmelCase )
lowercase__ : Union[str, Any] = custom_convert_ldm_vae_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Tuple = AutoencoderKL(**_lowerCAmelCase )
vae.load_state_dict(_lowerCAmelCase )
vae.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
_UpperCamelCase : Union[str, Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 77
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41
| 0
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> float:
"""simple docstring"""
snake_case : Tuple = 0
while len(lowercase ) > 1:
snake_case : Dict = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case : Dict = files.index(min(lowercase ) )
temp += files[min_index]
files.pop(lowercase )
files.append(lowercase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 0
|
import random
from typing import Any
def _a ( SCREAMING_SNAKE_CASE : str ) -> list[Any]:
"""simple docstring"""
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase: List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__lowerCAmelCase: Tuple = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__lowerCAmelCase: List[str] = data[b], data[a]
return data
if __name__ == "__main__":
_a = [0, 1, 2, 3, 4, 5, 6, 7]
_a = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 322
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = set(__A), [start]
while stack:
_a = stack.pop()
explored.add(__A)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(__A)
return explored
lowercase_ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 211
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 217
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: str=0.6 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
lowerCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = TFViTMAEModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : str = outputs_dict[0].numpy()
lowerCamelCase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCamelCase_ ( self: Dict ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = v.numpy()
else:
lowerCamelCase__ : Union[str, Any] = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = prepare_numpy_arrays(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: str ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
lowerCamelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ : List[str] = main_layer_class(UpperCamelCase__ )
lowerCamelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ : List[str] = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
lowerCamelCase__ : int = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : Any = outputs.last_hidden_state.numpy()
lowerCamelCase__ : List[str] = 0
else:
lowerCamelCase__ : int = outputs.logits.numpy()
lowerCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : Dict = model_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : str = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : Union[str, Any] = after_outputs["""logits"""].numpy()
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
def lowerCamelCase_ ( self: Any ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
lowerCamelCase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ : int = model_class.from_config(model.config )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[str] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : Tuple = ViTMAEConfig()
lowerCamelCase__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ : str = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 41
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class UpperCAmelCase_ ( _lowercase ):
UpperCamelCase ="encodec"
def __init__( self , UpperCamelCase_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , UpperCamelCase_=2_40_00 , UpperCamelCase_=1 , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=1_28 , UpperCamelCase_=32 , UpperCamelCase_=1 , UpperCamelCase_=[8, 5, 4, 2] , UpperCamelCase_="weight_norm" , UpperCamelCase_=7 , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_="reflect" , UpperCamelCase_=2 , UpperCamelCase_=2 , UpperCamelCase_=1.0 , UpperCamelCase_=10_24 , UpperCamelCase_=None , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]:
__lowercase : Any = target_bandwidths
__lowercase : List[Any] = sampling_rate
__lowercase : Union[str, Any] = audio_channels
__lowercase : Optional[int] = normalize
__lowercase : Tuple = chunk_length_s
__lowercase : List[Any] = overlap
__lowercase : Optional[Any] = hidden_size
__lowercase : Union[str, Any] = num_filters
__lowercase : Optional[Any] = num_residual_layers
__lowercase : Dict = upsampling_ratios
__lowercase : List[Any] = norm_type
__lowercase : Union[str, Any] = kernel_size
__lowercase : List[Any] = last_kernel_size
__lowercase : str = residual_kernel_size
__lowercase : List[Any] = dilation_growth_rate
__lowercase : Dict = use_causal_conv
__lowercase : List[str] = pad_mode
__lowercase : int = compress
__lowercase : List[str] = num_lstm_layers
__lowercase : List[str] = trim_right_ratio
__lowercase : List[Any] = codebook_size
__lowercase : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
__lowercase : Any = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase__ )
@property
def _lowerCamelCase ( self ) -> int:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowerCamelCase ( self ) -> Any:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowerCamelCase ( self ) -> Any:
__lowercase : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowerCamelCase ( self ) -> List[str]:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 249
|
'''simple docstring'''
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: list[int] ):
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__a = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__a = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__a = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Any = val
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ : List[str] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ : Optional[int] = value
else:
lowercase__ : Any = value
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : Optional[int] = """"""
if is_panoptic:
lowercase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ : List[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : int = in_proj_weight[:256, :]
lowercase__ : Any = in_proj_bias[:256]
lowercase__ : str = in_proj_weight[256:512, :]
lowercase__ : Optional[int] = in_proj_bias[256:512]
lowercase__ : Dict = in_proj_weight[-256:, :]
lowercase__ : str = in_proj_bias[-256:]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ : Optional[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase__ : Any = """resnet101"""
if "dc5" in model_name:
lowercase__ : Optional[int] = True
lowercase__ : int = """panoptic""" in model_name
if is_panoptic:
lowercase__ : List[str] = 250
else:
lowercase__ : int = 91
lowercase__ : int = """huggingface/label-files"""
lowercase__ : List[str] = """coco-detection-id2label.json"""
lowercase__ : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Any = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowercase__ : int = ConditionalDetrImageProcessor(format=lowerCamelCase__ )
# prepare image
lowercase__ : List[str] = prepare_img()
lowercase__ : int = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
lowercase__ : List[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , lowerCamelCase__ , pretrained=lowerCamelCase__ ).eval()
lowercase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ , is_panoptic=lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowercase__ : int = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__ : List[str] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Any = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowercase__ : int = state_dict.pop(lowerCamelCase__ )
lowercase__ : Tuple = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
lowercase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowercase__ : Tuple = ConditionalDetrForSegmentation(lowerCamelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
lowercase__ : Optional[Any] = conditional_detr(lowerCamelCase__ )
lowercase__ : Optional[Any] = model(lowerCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 130
|
'''simple docstring'''
from __future__ import annotations
_A : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: dict[str, list[str]] , UpperCamelCase__: str ):
lowerCamelCase__ : str = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Any = source_vertex
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = {self.source_vertex}
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Tuple = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Tuple = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCamelCase__ : List[str] = vertex
queue.append(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCamelCase__ : int = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_A : int =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 41
| 0
|
from typing import Any
class a__ :
def __init__( self : List[Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = data
SCREAMING_SNAKE_CASE_ : Optional[int] = None
class a__ :
def __init__( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = None
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.head
while temp is not None:
print(temp.data,end=" " )
SCREAMING_SNAKE_CASE_ : str = temp.next
print()
def __UpperCamelCase ( self : Tuple,_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Node(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.head
SCREAMING_SNAKE_CASE_ : int = new_node
def __UpperCamelCase ( self : int,_A : str,_A : Any ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE_ : str = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE_ : Tuple = node_a.next
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE_ : int = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE_ : Dict = node_a.data, node_a.data
if __name__ == "__main__":
__lowerCamelCase : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 18
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : int = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_0 , a=2 , a=3 , a=True , a=True , a=3_2 , a=2 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , ) -> Tuple:
lowercase__ : List[Any] = parent
lowercase__ : str = batch_size
lowercase__ : Tuple = image_size
lowercase__ : Tuple = patch_size
lowercase__ : Dict = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Dict = type_sequence_label_size
lowercase__ : int = initializer_range
lowercase__ : Optional[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ : str = (image_size // patch_size) ** 2
lowercase__ : Optional[Any] = num_patches + 1
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Dict:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> List[str]:
lowercase__ : str = TFViTModel(config=UpperCamelCase__ )
lowercase__ : Dict = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase__ : Tuple = self.image_size // 2
lowercase__ : Dict = pixel_values[:, :, :image_size, :image_size]
lowercase__ : Dict = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
lowercase__ : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> List[Any]:
lowercase__ : List[str] = self.type_sequence_label_size
lowercase__ : Optional[Any] = TFViTForImageClassification(UpperCamelCase__ )
lowercase__ : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase__ : str = self.image_size // 2
lowercase__ : str = pixel_values[:, :, :image_size, :image_size]
lowercase__ : str = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Optional[Any] = 1
lowercase__ : Tuple = TFViTForImageClassification(UpperCamelCase__ )
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
lowerCamelCase__ : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase__ : Optional[Any] = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[str] = TFViTModelTester(self )
lowercase__ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(UpperCamelCase__ )
lowercase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCamelCase__ )
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : str = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowercase__ : str = self.default_image_processor
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=UpperCamelCase__ , return_tensors='tf' )
# forward pass
lowercase__ : Union[str, Any] = model(**UpperCamelCase__ )
# verify the logits
lowercase__ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase__ : Dict = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
| 77
|
'''simple docstring'''
from __future__ import annotations
import requests
_A : str =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "new" , UpperCamelCase = None ) -> dict:
lowerCamelCase__ : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
lowerCamelCase__ : str = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : str = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
lowerCamelCase__ : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
lowerCamelCase__ : Dict = {}
for id_ in range(UpperCamelCase ):
lowerCamelCase__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 41
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = tempfile.mkdtemp()
# fmt: off
snake_case : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case : List[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case : List[Any] = {"""unk_token""": """<unk>"""}
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
snake_case : Union[str, Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
snake_case : Dict = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : int = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Dict = self.get_image_processor()
snake_case : Any = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
snake_case : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : str = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
snake_case : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : int = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case : Tuple = self.prepare_image_inputs()
snake_case : Dict = image_processor(UpperCamelCase__ , return_tensors="np" )
snake_case : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.get_image_processor()
snake_case : Optional[int] = self.get_tokenizer()
snake_case : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case : Optional[Any] = """lower newer"""
snake_case : Optional[int] = processor(text=UpperCamelCase__ )
snake_case : Union[str, Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case : List[str] = """lower newer"""
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : List[Any] = self.prepare_image_inputs()
snake_case : Optional[int] = processor(images=UpperCamelCase__ , visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Dict = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[Any] = processor.batch_decode(UpperCamelCase__ )
snake_case : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 203
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_A : Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
lowerCamelCase__ : Optional[int] = """"""
if is_panoptic:
lowerCamelCase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:256, :]
lowerCamelCase__ : Any = in_proj_bias[:256]
lowerCamelCase__ : str = in_proj_weight[256:512, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase__ : Dict = in_proj_weight[-256:, :]
lowerCamelCase__ : str = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ : Any = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : int = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ : List[str] = 250
else:
lowerCamelCase__ : int = 91
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """coco-detection-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase )
# prepare image
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval()
lowerCamelCase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Tuple = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A : Optional[Any] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_a = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_a = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class A_ ( _lowercase ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ElectraTokenizer
def __init__( self : int , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=True , UpperCAmelCase : Tuple="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : Dict="[CLS]" , UpperCAmelCase : int="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase__ ) != tokenize_chinese_chars
):
__lowerCAmelCase: Optional[Any] = getattr(UpperCamelCase__ , normalizer_state.pop('type' ) )
__lowerCAmelCase: Optional[int] = do_lower_case
__lowerCAmelCase: List[Any] = strip_accents
__lowerCAmelCase: Tuple = tokenize_chinese_chars
__lowerCAmelCase: Dict = normalizer_class(**UpperCamelCase__ )
__lowerCAmelCase: Optional[Any] = do_lower_case
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ) -> List[Any]:
__lowerCAmelCase: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> Any:
__lowerCAmelCase: List[Any] = [self.sep_token_id]
__lowerCAmelCase: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple:
__lowerCAmelCase: int = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 322
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Tuple = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , **UpperCamelCase__: Tuple ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : Any = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[str] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 41
| 0
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self , A=32 ) -> Optional[Any]:
"""simple docstring"""
set_seed(0 )
_a = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
_a = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_a = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
_a = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_a = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
_a = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
_a = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
_a = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
_a = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_a = model(UpperCamelCase__ , timesteps[i] ).sample
_a = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_a = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
_a = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_a = model(UpperCamelCase__ , timesteps[i] ).sample
_a = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 211
|
'''simple docstring'''
class _lowercase :
def __init__( self: Optional[Any] ):
lowerCamelCase__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCamelCase__ : List[str] = False
def lowerCamelCase_ ( self: str , UpperCamelCase__: list[str] ):
for word in words:
self.insert(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str ):
lowerCamelCase__ : List[Any] = self
for char in word:
if char not in curr.nodes:
lowerCamelCase__ : Tuple = TrieNode()
lowerCamelCase__ : List[Any] = curr.nodes[char]
lowerCamelCase__ : Any = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase__ : Any = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
def _delete(UpperCamelCase__: TrieNode , UpperCamelCase__: str , UpperCamelCase__: int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase__ : str = False
return len(curr.nodes ) == 0
lowerCamelCase__ : List[str] = word[index]
lowerCamelCase__ : Dict = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase__ : List[Any] = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase__ , 0 )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
if node.is_leaf:
print(UpperCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase , word + key )
def SCREAMING_SNAKE_CASE_ () -> bool:
lowerCamelCase__ : str = """banana bananas bandana band apple all beast""".split()
lowerCamelCase__ : Union[str, Any] = TrieNode()
root.insert_many(UpperCamelCase )
# print_words(root, "")
assert all(root.find(UpperCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> None:
print(str(UpperCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ () -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ () -> None:
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 41
| 0
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 0
|
"""simple docstring"""
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
a_ = '''pytorch_model.bin'''
a_ = '''pytorch_model.bin.index.json'''
a_ = '''adapter_config.json'''
a_ = '''adapter_model.bin'''
a_ = '''adapter_model.safetensors'''
a_ = '''tf_model.h5'''
a_ = '''tf_model.h5.index.json'''
a_ = '''model.ckpt'''
a_ = '''flax_model.msgpack'''
a_ = '''flax_model.msgpack.index.json'''
a_ = '''model.safetensors'''
a_ = '''model.safetensors.index.json'''
a_ = '''config.json'''
a_ = '''preprocessor_config.json'''
a_ = FEATURE_EXTRACTOR_NAME
a_ = '''generation_config.json'''
a_ = '''modelcard.json'''
a_ = '''▁'''
a_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
a_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
a_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
a_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __UpperCAmelCase ( __UpperCamelCase ):
if version.parse(__UpperCamelCase ) < version.parse(__UpperCamelCase ):
if "dev" in min_version:
__lowercase : Dict = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
__lowercase : int = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 249
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__a = '''hf-internal-testing/tiny-random-bert'''
__a = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__a = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
snake_case__ : Dict = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
snake_case__ : int = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
snake_case__ : Union[str, Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
snake_case__ : str = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase ( self : List[Any] ):
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
snake_case__ : Tuple = cached_file("""tiny-random-bert""" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
snake_case__ : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
snake_case__ : str = cached_file(UpperCamelCase__ , """conf""" )
def lowerCamelCase ( self : Optional[int] ):
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
snake_case__ : Any = cached_file(UpperCamelCase__ , """conf""" )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
snake_case__ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""" ) ) )
snake_case__ : Optional[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case__ : Dict = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case__ : List[Any] = mock.Mock()
snake_case__ : str = 500
snake_case__ : List[str] = {}
snake_case__ : Union[str, Any] = HTTPError
snake_case__ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__ ) as mock_head:
snake_case__ : List[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : Dict ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
def lowerCamelCase ( self : Optional[Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""" )
snake_case__ : Tuple = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case__ : str = json.loads(open(UpperCamelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCamelCase ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = Path(UpperCamelCase__ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt""" ) )
| 35
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] =logging.get_logger(__name__)
_A : Dict =['''model.decoder.embed_positions.weights''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
if "emb" in name:
lowerCamelCase__ : Dict = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase__ : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase__ : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase__ : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase__ : Dict = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase__ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase__ : int = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase__ : int = list(state_dict.keys() )
lowerCamelCase__ : Tuple = {}
for key in keys:
lowerCamelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = rename_keys(UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase__ : Union[str, Any] = val[:hidden_size, :]
lowerCamelCase__ : Any = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase__ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase__ : str = val
else:
lowerCamelCase__ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase__ : int = 1024
lowerCamelCase__ : int = 24
lowerCamelCase__ : List[Any] = 16
elif checkpoint == "medium":
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : Union[str, Any] = 48
lowerCamelCase__ : Optional[int] = 24
elif checkpoint == "large":
lowerCamelCase__ : Optional[Any] = 2048
lowerCamelCase__ : Dict = 48
lowerCamelCase__ : List[Any] = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCamelCase__ : Any = MusicgenDecoderConfig(
hidden_size=UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase , num_attention_heads=UpperCamelCase , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="cpu" ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MusicGen.get_pretrained(UpperCamelCase , device=UpperCamelCase )
lowerCamelCase__ : List[Any] = decoder_config_from_checkpoint(UpperCamelCase )
lowerCamelCase__ : Any = fairseq_model.lm.state_dict()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = rename_state_dict(
UpperCamelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase__ : str = TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase__ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase__ : Optional[int] = MusicgenForCausalLM(UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase__ , lowerCamelCase__ : List[str] = decoder.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCamelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase , audio_encoder=UpperCamelCase , decoder=UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase )
# check we can do a forward pass
lowerCamelCase__ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase__ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase__ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
# set the appropriate bos/pad token ids
lowerCamelCase__ : Union[str, Any] = 2048
lowerCamelCase__ : List[str] = 2048
# set other default generation config params
lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase )
processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_A : List[str] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 41
| 0
|
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = 0 ):
"""simple docstring"""
lowercase__ : Any = length or len(lowerCamelCase__ )
lowercase__ : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase__ : Dict = list_data[i + 1], list_data[i]
lowercase__ : Optional[Any] = True
return list_data if not swapped else bubble_sort(lowerCamelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
|
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = ['''model.decoder.embed_positions.weights''']
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace("linear1" , "fc1" )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace("linear2" , "fc2" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : Tuple = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : Any = state_dict.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rename_keys(lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : Any = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : str = val
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : int = 1_0_2_4
SCREAMING_SNAKE_CASE_ : int = 2_4
SCREAMING_SNAKE_CASE_ : List[Any] = 1_6
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : Any = 1_5_3_6
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4_8
SCREAMING_SNAKE_CASE_ : Optional[int] = 2_4
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2_0_4_8
SCREAMING_SNAKE_CASE_ : Dict = 4_8
SCREAMING_SNAKE_CASE_ : List[Any] = 3_2
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : Any = MusicgenDecoderConfig(
hidden_size=lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCAmelCase , num_attention_heads=lowerCAmelCase , )
return config
@torch.no_grad()
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict="cpu" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MusicGen.get_pretrained(lowerCAmelCase , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_config_from_checkpoint(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = rename_state_dict(
lowerCAmelCase , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : str = TaEncoderModel.from_pretrained("t5-base" )
SCREAMING_SNAKE_CASE_ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
SCREAMING_SNAKE_CASE_ : Optional[int] = MusicgenForCausalLM(lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(lowerCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=lowerCAmelCase , audio_encoder=lowerCAmelCase , decoder=lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCAmelCase )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained("t5-base" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
SCREAMING_SNAKE_CASE_ : Optional[int] = MusicgenProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2_0_4_8
SCREAMING_SNAKE_CASE_ : List[str] = 2_0_4_8
# set other default generation config params
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(3_0 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowerCAmelCase )
processor.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 18
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ : Tuple = y, x % y
return abs(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
try:
lowerCamelCase__ : Dict = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ : Any = int(nums[0] )
lowerCamelCase__ : Optional[Any] = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 41
| 0
|
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return False
A : List[str] = len(_lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , _lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[str] = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE_:Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")]
SCREAMING_SNAKE_CASE_:Union[str, Any] = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE_:Optional[int] = '''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""")
| 116
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase ( __snake_case : int ):
lowercase_ : Tuple = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
lowercase_ : Union[str, Any] = 6
lowercase_ : Dict = 1_2_8
lowercase_ : Any = (2, 2, 1_8, 2)
lowercase_ : Tuple = (4, 8, 1_6, 3_2)
elif "large" in model_name:
lowercase_ : Any = 1_2
lowercase_ : Tuple = 1_9_2
lowercase_ : List[str] = (2, 2, 1_8, 2)
lowercase_ : Union[str, Any] = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase_ : str = window_size
lowercase_ : Dict = embed_dim
lowercase_ : Tuple = depths
lowercase_ : str = num_heads
return config
def lowercase ( __snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowercase_ : Any = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase_ : List[Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase_ : str = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase_ : List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ : List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ : int = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase_ : Optional[int] = """layernorm.weight"""
if name == "encoder.norm.bias":
lowercase_ : str = """layernorm.bias"""
if "decoder" in name:
pass
else:
lowercase_ : Tuple = """swin.""" + name
return name
def lowercase ( __snake_case : Optional[int] , __snake_case : Union[str, Any] ):
for key in orig_state_dict.copy().keys():
lowercase_ : Tuple = orig_state_dict.pop(__snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase_ : Union[str, Any] = key.split('''.''' )
lowercase_ : Optional[Any] = int(key_split[2] )
lowercase_ : Any = int(key_split[4] )
lowercase_ : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ : Union[str, Any] = val[:dim, :]
lowercase_ : Tuple = val[
dim : dim * 2, :
]
lowercase_ : Union[str, Any] = val[-dim:, :]
else:
lowercase_ : Union[str, Any] = val[
:dim
]
lowercase_ : int = val[
dim : dim * 2
]
lowercase_ : List[str] = val[
-dim:
]
else:
lowercase_ : Union[str, Any] = val
return orig_state_dict
def lowercase ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : str ):
lowercase_ : Optional[int] = torch.load(__snake_case , map_location='''cpu''' )["""model"""]
lowercase_ : List[str] = get_swin_config(__snake_case )
lowercase_ : str = SwinForMaskedImageModeling(__snake_case )
model.eval()
lowercase_ : List[Any] = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
lowercase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ : Dict = ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
lowercase_ : Dict = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
lowercase_ : Union[str, Any] = image_processor(images=__snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowercase_ : List[Any] = model(**__snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A : int = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 33
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = emb.weight.shape
lowercase__ : Optional[int] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : List[Any] = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any=None ):
'''simple docstring'''
lowercase__ : str = {}
for old_key in state_dict.keys():
lowercase__ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowercase__ : List[str] = key.replace('moe_layer.experts.0' , f"""ffn.experts.expert_{expert_idx}""" )
else:
lowercase__ : Dict = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
lowercase__ : Dict = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
lowercase__ : Optional[Any] = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
lowercase__ : List[str] = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
lowercase__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
lowercase__ : List[Any] = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
lowercase__ : Tuple = key.replace('final_layer_norm' , 'ff_layer_norm' )
lowercase__ : List[str] = state_dict[old_key]
return new_dict
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] = WEIGHTS_NAME ):
'''simple docstring'''
lowercase__ : Optional[Any] = []
lowercase__ : Optional[int] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
for expert in range(_lowerCAmelCase ):
lowercase__ : Optional[Any] = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_lowerCAmelCase ):
lowercase__ : int = torch.load(_lowerCAmelCase )["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
lowercase__ : Optional[Any] = rename_fairseq_keys(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = os.path.join(
_lowerCAmelCase , weights_name.replace('.bin' , f"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCAmelCase )[0]].dtype )
# Add the last block
lowercase__ : Optional[int] = os.path.join(_lowerCAmelCase , weights_name.replace('.bin' , f"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) )
lowercase__ : Dict = torch.load(switch_checkpoint_path + '-shared.pt' )["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
lowercase__ : Dict = rename_fairseq_keys(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCAmelCase ) == 1:
lowercase__ : List[str] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCAmelCase , _lowerCAmelCase )
# Otherwise, let's build the index
lowercase__ : Dict = {}
for idx, shard in enumerate(_lowerCAmelCase ):
lowercase__ : Any = weights_name.replace('.bin' , f"""-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin""" )
lowercase__ : List[Any] = os.path.join(_lowerCAmelCase , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
for key in shard:
lowercase__ : str = shard_file
# Add the metadata
lowercase__ : int = {"""total_size""": total_size}
lowercase__ : Any = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , 'w' , encoding='utf-8' ) as f:
lowercase__ : Tuple = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + """\n"""
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_UpperCamelCase : Dict = parser.parse_args()
_UpperCamelCase : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
_UpperCamelCase : Union[str, Any] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
_UpperCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 77
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
__snake_case = {
'''google/pegasus-xsum''': 512,
}
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( _lowercase ):
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<mask_2>" , UpperCamelCase__="<mask_1>" , UpperCamelCase__=None , UpperCamelCase__=103 , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
snake_case : Dict = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError(
F'additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is'
F' {type(UpperCamelCase__ )}' )
snake_case : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(UpperCamelCase__ ) , self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
snake_case : Tuple = additional_special_tokens_extended
else:
snake_case : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
snake_case : Optional[int] = mask_token_sent
snake_case : Optional[Any] = vocab_file
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
snake_case : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
snake_case : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case : int = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
snake_case : Tuple = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
snake_case : List[str] = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
snake_case : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def lowerCamelCase ( self , UpperCamelCase__=False ) -> Tuple:
'''simple docstring'''
return 1
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> int:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : str = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 203
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 0
|
class A_ :
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> Optional[int]:
__lowerCAmelCase: Dict = name
__lowerCAmelCase: Union[str, Any] = value
__lowerCAmelCase: str = weight
def __repr__( self : Dict ) -> Any:
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self : int ) -> int:
return self.value
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
return self.name
def UpperCAmelCase ( self : str ) -> Optional[Any]:
return self.weight
def UpperCAmelCase ( self : Any ) -> List[str]:
return self.value / self.weight
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Dict = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _a ( ) -> Tuple:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.