code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__ ( __a):
SCREAMING_SNAKE_CASE__ = DistilBertTokenizer
SCREAMING_SNAKE_CASE__ = DistilBertTokenizerFast
SCREAMING_SNAKE_CASE__ = True
@slow
def __A (self ) -> List[str]:
_lowercase =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
_lowercase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase )
_lowercase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 5
|
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Dict = "pt"
elif is_tf_available():
A : List[Any] = "tf"
else:
A : Optional[Any] = "jax"
class _lowercase ( __a , unittest.TestCase):
"""simple docstring"""
A__ = PerceiverTokenizer
A__ = False
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowerCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=20 , __lowerCamelCase : Union[str, Any]=5 ):
'''simple docstring'''
lowerCamelCase__ : int = []
for i in range(len(__lowerCamelCase ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Tuple = list(filter(lambda __lowerCamelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , __lowerCamelCase ) )
lowerCamelCase__ : Dict = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
lowerCamelCase__ : Optional[Any] = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
lowerCamelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Any = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : int = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
lowerCamelCase__ : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
lowerCamelCase__ : int = " " + output_txt
lowerCamelCase__ : Any = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Tuple = "Unicode €."
lowerCamelCase__ : Optional[Any] = tokenizer(__lowerCamelCase )
lowerCamelCase__ : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __lowerCamelCase )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "[CLS]Unicode €.[SEP]" )
lowerCamelCase__ : str = tokenizer("e è é ê ë" )
lowerCamelCase__ : Any = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __lowerCamelCase )
# decoding
lowerCamelCase__ : List[str] = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowerCamelCase__ : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertNotIn("decoder_input_ids" , __lowerCamelCase )
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
lowerCamelCase__ : Dict = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : str = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Dict = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Tuple = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
lowerCamelCase__ : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : str = tempfile.mkdtemp()
lowerCamelCase__ : List[Any] = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowerCamelCase__ : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : str = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Dict = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(__lowerCamelCase )
lowerCamelCase__ : int = [f"<extra_id_{i}>" for i in range(125 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowerCamelCase__ : str = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : int = tokenizer_class.from_pretrained(
__lowerCamelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Any = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__lowerCamelCase )]
lowerCamelCase__ : int = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Optional[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
| 184
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34
| 0
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="resnet50" , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = out_indices if out_indices is not None else [4]
_UpperCAmelCase = stage_names
_UpperCAmelCase = out_features
_UpperCAmelCase = backbone
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = is_training
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TimmBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( __a , __a , __a , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase__ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TimmBackboneModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'resnet18'
_UpperCAmelCase = 'microsoft/resnet-18'
_UpperCAmelCase = AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase )
_UpperCAmelCase = AutoBackbone.from_pretrained(UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_UpperCAmelCase = AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase , out_indices=[1, 2, 3] )
_UpperCAmelCase = AutoBackbone.from_pretrained(UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Safetensors is not supported by timm.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_UpperCAmelCase = self.all_model_classes[0]
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase )
_UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
_UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_UpperCAmelCase = copy.deepcopy(UpperCAmelCase )
_UpperCAmelCase = None
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_UpperCAmelCase = copy.deepcopy(UpperCAmelCase )
_UpperCAmelCase = False
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(**UpperCAmelCase )
| 39
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[int]:
_A : Tuple = parent
_A : str = 13
_A : int = 7
_A : Dict = True
_A : Optional[int] = True
_A : str = True
_A : Any = True
_A : str = 99
_A : int = 32
_A : Any = 2
_A : Union[str, Any] = 4
_A : List[str] = 37
_A : Tuple = """gelu"""
_A : Optional[Any] = 0.1
_A : List[str] = 0.1
_A : Any = 512
_A : Union[str, Any] = 16
_A : Optional[Any] = 2
_A : Union[str, Any] = 0.02
_A : str = 3
_A : Optional[Any] = 4
_A : Tuple = None
def a__ ( self ) -> Optional[int]:
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Union[str, Any] = None
if self.use_input_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_A : List[Any] = None
if self.use_token_type_ids:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Union[str, Any] = None
_A : List[str] = None
_A : Any = None
if self.use_labels:
_A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_A : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_A : List[Any] = TFRoFormerModel(config=_a )
_A : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_A : Optional[Any] = [input_ids, input_mask]
_A : Optional[int] = model(_a )
_A : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
_A : List[str] = True
_A : List[str] = TFRoFormerForCausalLM(config=_a )
_A : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : int = model(_a )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
_A : Optional[int] = TFRoFormerForMaskedLM(config=_a )
_A : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : Dict = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
_A : str = self.num_labels
_A : Tuple = TFRoFormerForSequenceClassification(config=_a )
_A : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : List[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_A : int = self.num_choices
_A : Dict = TFRoFormerForMultipleChoice(config=_a )
_A : str = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_A : List[str] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_A : Dict = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_A : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
_A : List[str] = self.num_labels
_A : Union[str, Any] = TFRoFormerForTokenClassification(config=_a )
_A : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : List[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_A : List[Any] = TFRoFormerForQuestionAnswering(config=_a )
_A : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : Union[str, Any] = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ) -> List[str]:
_A : Any = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Tuple = config_and_inputs
_A : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( __a,__a,unittest.TestCase ):
_a = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_a = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
def a__ ( self , _a , _a , _a , _a , _a ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def a__ ( self ) -> str:
_A : Any = TFRoFormerModelTester(self )
_A : Optional[int] = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[str]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_a )
def a__ ( self ) -> List[str]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def a__ ( self ) -> Dict:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def a__ ( self ) -> int:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def a__ ( self ) -> Dict:
_A : List[Any] = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(_a )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> List[Any]:
_A : str = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_A : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A : str = model(_a )[0]
# TODO Replace vocab size
_A : Union[str, Any] = 5_0000
_A : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , _a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_A : List[str] = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
@require_tf
class lowercase ( unittest.TestCase ):
_a = 1e-4
def a__ ( self ) -> Dict:
_A : Optional[Any] = tf.constant([[4, 10]] )
_A : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_A : Union[str, Any] = emba(input_ids.shape )
_A : Union[str, Any] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
def a__ ( self ) -> Optional[Any]:
_A : int = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_A : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_A : int = emba.weight[:3, :5]
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
@require_tf
class lowercase ( unittest.TestCase ):
_a = 1e-4
def a__ ( self ) -> Tuple:
_A : List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_A : Union[str, Any] = embed_positions([2, 16, 768] )[None, None, :, :]
_A , _A : str = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_a , _a , _a )
_A : Any = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_A : Dict = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
| 26
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : List[str] ) -> str:
_a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_a = 128
elif "12-12" in model_name:
_a = 12
_a = 12
elif "14-14" in model_name:
_a = 14
_a = 14
elif "16-16" in model_name:
_a = 16
_a = 16
else:
raise ValueError("Model not supported" )
_a = "huggingface/label-files"
if "speech-commands" in model_name:
_a = 35
_a = "speech-commands-v2-id2label.json"
else:
_a = 527
_a = "audioset-id2label.json"
_a = json.load(open(hf_hub_download(_a , _a , repo_type="dataset" ) , "r" ) )
_a = {int(_a ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( lowercase : Tuple ) -> List[str]:
if "module.v" in name:
_a = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_a = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_a = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_a = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_a = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_a = name.replace("attn" , "attention.self" )
if "norm1" in name:
_a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_a = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_a = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_a = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_a = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def _lowerCamelCase ( lowercase : Dict , lowercase : List[Any] ) -> Any:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_a )
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[3] )
_a = config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def _lowerCamelCase ( lowercase : Tuple ) -> str:
_a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def _lowerCamelCase ( lowercase : int , lowercase : Union[str, Any] , lowercase : Dict=False ) -> List[Any]:
_a = get_audio_spectrogram_transformer_config(_a )
_a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_a = model_name_to_url[model_name]
_a = torch.hub.load_state_dict_from_url(_a , map_location="cpu" )
# remove some keys
remove_keys(_a )
# rename some keys
_a = convert_state_dict(_a , _a )
# load 🤗 model
_a = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_a = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78
_a = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26
_a = 1024 if "speech-commands" not in model_name else 128
_a = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
_a = load_dataset("speech_commands" , "v0.02" , split="validation" )
_a = dataset[0]["audio"]["array"]
else:
_a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_a , _a = torchaudio.load(_a )
_a = waveform.squeeze().numpy()
_a = feature_extractor(_a , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
_a = model(**_a )
_a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_a = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_a = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_a = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_a = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_a = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_a = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_a = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_a = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError("Logits don\'t match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ : Optional[int] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
| 0
|
"""simple docstring"""
def A_ ( _lowercase ):
'''simple docstring'''
if not isinstance(_a, _a ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
| 0
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCamelCase : List[str] = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : Optional[int] = """PoolFormerConfig"""
# Base docstring
__lowerCamelCase : Union[str, Any] = """sail/poolformer_s12"""
__lowerCamelCase : Dict = [1, 512, 7, 7]
# Image classification docstring
__lowerCamelCase : Union[str, Any] = """sail/poolformer_s12"""
__lowerCamelCase : Optional[int] = """tabby, tabby cat"""
__lowerCamelCase : str = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A_ ( _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False ) -> Union[str, Any]:
if drop_prob == 0.0 or not training:
return input
UpperCamelCase : List[Any] = 1 - drop_prob
UpperCamelCase : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCamelCase : str = keep_prob + torch.rand(_a , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
UpperCamelCase : List[str] = input.div(_a ) * random_tensor
return output
class A__ ( nn.Module ):
def __init__( self , A_ = None ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Any = drop_prob
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return drop_path(A_ , self.drop_prob , self.training )
def __UpperCamelCase( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=None ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Tuple = patch_size if isinstance(A_ , collections.abc.Iterable ) else (patch_size, patch_size)
UpperCamelCase : str = stride if isinstance(A_ , collections.abc.Iterable ) else (stride, stride)
UpperCamelCase : Tuple = padding if isinstance(A_ , collections.abc.Iterable ) else (padding, padding)
UpperCamelCase : Optional[Any] = nn.Convad(A_ , A_ , kernel_size=A_ , stride=A_ , padding=A_ )
UpperCamelCase : List[str] = norm_layer(A_ ) if norm_layer else nn.Identity()
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = self.projection(A_ )
UpperCamelCase : Any = self.norm(A_ )
return embeddings
class A__ ( nn.GroupNorm ):
def __init__( self , A_ , **A_ ):
'''simple docstring'''
super().__init__(1 , A_ , **A_ )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Tuple = nn.AvgPoolad(A_ , stride=1 , padding=pool_size // 2 , count_include_pad=A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.pool(A_ ) - hidden_states
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[Any] = nn.Convad(A_ , A_ , 1 )
UpperCamelCase : List[Any] = nn.Convad(A_ , A_ , 1 )
UpperCamelCase : str = PoolFormerDropPath(A_ )
if isinstance(config.hidden_act , A_ ):
UpperCamelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
UpperCamelCase : int = config.hidden_act
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.conva(A_ )
UpperCamelCase : Dict = self.act_fn(A_ )
UpperCamelCase : str = self.drop(A_ )
UpperCamelCase : List[Any] = self.conva(A_ )
UpperCamelCase : List[str] = self.drop(A_ )
return hidden_states
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Tuple = PoolFormerPooling(A_ )
UpperCamelCase : Tuple = PoolFormerOutput(A_ , A_ , A_ , A_ )
UpperCamelCase : str = PoolFormerGroupNorm(A_ )
UpperCamelCase : str = PoolFormerGroupNorm(A_ )
# Useful for training neural nets
UpperCamelCase : List[Any] = PoolFormerDropPath(A_ ) if drop_path > 0.0 else nn.Identity()
UpperCamelCase : Any = config.use_layer_scale
if config.use_layer_scale:
UpperCamelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((A_) ) , requires_grad=A_ )
UpperCamelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((A_) ) , requires_grad=A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.use_layer_scale:
UpperCamelCase : Dict = self.pooling(self.before_norm(A_ ) )
UpperCamelCase : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCamelCase : List[Any] = hidden_states + self.drop_path(A_ )
UpperCamelCase : Dict = ()
UpperCamelCase : Union[str, Any] = self.output(self.after_norm(A_ ) )
UpperCamelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCamelCase : Union[str, Any] = hidden_states + self.drop_path(A_ )
UpperCamelCase : Union[str, Any] = (output,) + outputs
return outputs
else:
UpperCamelCase : List[str] = self.drop_path(self.pooling(self.before_norm(A_ ) ) )
# First residual connection
UpperCamelCase : Tuple = pooling_output + hidden_states
UpperCamelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
UpperCamelCase : Optional[int] = self.drop_path(self.output(self.after_norm(A_ ) ) )
UpperCamelCase : Union[str, Any] = hidden_states + layer_output
UpperCamelCase : Optional[int] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : List[Any] = config
# stochastic depth decay rule
UpperCamelCase : List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
UpperCamelCase : Tuple = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
UpperCamelCase : Any = nn.ModuleList(A_ )
# Transformer blocks
UpperCamelCase : List[Any] = []
UpperCamelCase : str = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCamelCase : Optional[int] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
A_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(A_ ) )
UpperCamelCase : str = nn.ModuleList(A_ )
def __UpperCamelCase( self , A_ , A_=False , A_=True ):
'''simple docstring'''
UpperCamelCase : int = () if output_hidden_states else None
UpperCamelCase : Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
UpperCamelCase , UpperCamelCase : Tuple = layers
# Get patch embeddings from hidden_states
UpperCamelCase : Dict = embedding_layer(A_ )
# Send the embeddings through the blocks
for _, blk in enumerate(A_ ):
UpperCamelCase : str = blk(A_ )
UpperCamelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
UpperCamelCase : List[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
class A__ ( __a ):
_UpperCAmelCase :Optional[int] = PoolFormerConfig
_UpperCAmelCase :Dict = """poolformer"""
_UpperCAmelCase :Union[str, Any] = """pixel_values"""
_UpperCAmelCase :Dict = True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __UpperCamelCase( self , A_ , A_=False ):
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase : Optional[Any] = value
__lowerCamelCase : Optional[Any] = r"""\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
__lowerCamelCase : Tuple = r"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __a , )
class A__ ( __a ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : List[str] = config
UpperCamelCase : List[Any] = PoolFormerEncoder(A_ )
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase( self , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCamelCase : Optional[int] = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , )
UpperCamelCase : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[int] = nn.Linear(config.hidden_size , config.hidden_size )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.dense(A_ )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __a , )
class A__ ( __a ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Optional[Any] = config.num_labels
UpperCamelCase : List[str] = PoolFormerModel(A_ )
# Final norm
UpperCamelCase : Dict = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCamelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase( self , A_ = None , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : int = self.poolformer(
A_ , output_hidden_states=A_ , return_dict=A_ , )
UpperCamelCase : str = outputs[0]
UpperCamelCase : List[str] = self.classifier(self.norm(A_ ).mean([-2, -1] ) )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase : str = "single_label_classification"
else:
UpperCamelCase : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase : str = MSELoss()
if self.num_labels == 1:
UpperCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase : Optional[int] = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase : Union[str, Any] = CrossEntropyLoss()
UpperCamelCase : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase : Union[str, Any] = BCEWithLogitsLoss()
UpperCamelCase : Union[str, Any] = loss_fct(A_ , A_ )
if not return_dict:
UpperCamelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 52
|
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict=7 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : int=30 , lowerCAmelCase : List[str]=400 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=[0.5, 0.5, 0.5] , lowerCAmelCase : int=[0.5, 0.5, 0.5] , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=1 / 255 , lowerCAmelCase : List[str]=True , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
SCREAMING_SNAKE_CASE_: Dict =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: List[Any] =num_channels
SCREAMING_SNAKE_CASE_: int =min_resolution
SCREAMING_SNAKE_CASE_: List[Any] =max_resolution
SCREAMING_SNAKE_CASE_: Any =do_resize
SCREAMING_SNAKE_CASE_: Optional[Any] =size
SCREAMING_SNAKE_CASE_: int =do_normalize
SCREAMING_SNAKE_CASE_: int =image_mean
SCREAMING_SNAKE_CASE_: Tuple =image_std
SCREAMING_SNAKE_CASE_: List[Any] =do_rescale
SCREAMING_SNAKE_CASE_: Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_: Any =do_pad
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE_: str =image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_: int =int(self.size["""shortest_edge"""] * h / w )
SCREAMING_SNAKE_CASE_: Any =self.size["""shortest_edge"""]
elif w > h:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE_: List[str] =int(self.size["""shortest_edge"""] * w / h )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE_: List[Any] =self.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_: Tuple =max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
SCREAMING_SNAKE_CASE_: str =max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( __a , unittest.TestCase ):
UpperCamelCase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =ConditionalDetrImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_: str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_: int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[Any] =image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_: Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: str =image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
SCREAMING_SNAKE_CASE_: List[str] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_: Any ={"""image_id""": 3_9769, """annotations""": target}
# encode them
SCREAMING_SNAKE_CASE_: Optional[Any] =ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
SCREAMING_SNAKE_CASE_: int =image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_: Any =torch.tensor([5_8_8_7.9_6_0_0, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE_: int =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_: Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase ) )
# verify size
SCREAMING_SNAKE_CASE_: str =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase ) )
@slow
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
SCREAMING_SNAKE_CASE_: Any =json.loads(f.read() )
SCREAMING_SNAKE_CASE_: str ={"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
SCREAMING_SNAKE_CASE_: List[Any] =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
SCREAMING_SNAKE_CASE_: List[str] =ConditionalDetrImageProcessor(format="""coco_panoptic""" )
SCREAMING_SNAKE_CASE_: Tuple =image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
SCREAMING_SNAKE_CASE_: List[Any] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE_: Dict =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_: int =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase ) )
# verify masks
SCREAMING_SNAKE_CASE_: List[Any] =82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase )
# verify orig_size
SCREAMING_SNAKE_CASE_: Optional[int] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase ) )
# verify size
SCREAMING_SNAKE_CASE_: Any =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase ) )
| 173
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34
| 0
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class UpperCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = """bertabs"""
def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=512, lowerCAmelCase__=6, lowerCAmelCase__=512, lowerCAmelCase__=8, lowerCAmelCase__=512, lowerCAmelCase__=0.2, lowerCAmelCase__=6, lowerCAmelCase__=768, lowerCAmelCase__=8, lowerCAmelCase__=2048, lowerCAmelCase__=0.2, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = max_pos
snake_case_ = enc_layers
snake_case_ = enc_hidden_size
snake_case_ = enc_heads
snake_case_ = enc_ff_size
snake_case_ = enc_dropout
snake_case_ = dec_layers
snake_case_ = dec_hidden_size
snake_case_ = dec_heads
snake_case_ = dec_ff_size
snake_case_ = dec_dropout
| 69
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = s.rsplit(_a , _a )
return new.join(_a )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_lowerCAmelCase = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
_lowerCAmelCase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
_lowerCAmelCase = rreplace(_a , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
_lowerCAmelCase = rreplace(_a , """.b""" , """.bias""" , 1 )
_lowerCAmelCase = value.float()
return upgrade
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=None , snake_case=True ):
"""simple docstring"""
from dall_e import Encoder
_lowerCAmelCase = Encoder()
if os.path.exists(_a ):
_lowerCAmelCase = torch.load(_a )
else:
_lowerCAmelCase = torch.hub.load_state_dict_from_url(_a )
if isinstance(_a , _a ):
_lowerCAmelCase = ckpt.state_dict()
encoder.load_state_dict(_a )
if config_path is not None:
_lowerCAmelCase = FlavaImageCodebookConfig.from_pretrained(_a )
else:
_lowerCAmelCase = FlavaImageCodebookConfig()
_lowerCAmelCase = FlavaImageCodebook(_a ).eval()
_lowerCAmelCase = encoder.state_dict()
_lowerCAmelCase = upgrade_state_dict(_a )
hf_model.load_state_dict(_a )
_lowerCAmelCase = hf_model.state_dict()
_lowerCAmelCase = count_parameters(_a )
_lowerCAmelCase = count_parameters(_a )
assert torch.allclose(_a , _a , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_a )
else:
return hf_state_dict
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 82
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : List[str] ):
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase = 1_2
UpperCAmelCase = 1_2
elif "14-14" in model_name:
UpperCAmelCase = 1_4
UpperCAmelCase = 1_4
elif "16-16" in model_name:
UpperCAmelCase = 1_6
UpperCAmelCase = 1_6
else:
raise ValueError('''Model not supported''' )
UpperCAmelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCAmelCase = 3_5
UpperCAmelCase = '''speech-commands-v2-id2label.json'''
else:
UpperCAmelCase = 5_2_7
UpperCAmelCase = '''audioset-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (_a : Tuple ):
if "module.v" in name:
UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def snake_case_ (_a : Dict , _a : List[Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ):
UpperCAmelCase = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCAmelCase = dataset[0]['''audio''']['''array''']
else:
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A__ ( __a):
def __init__( self , _SCREAMING_SNAKE_CASE=0.01 , _SCREAMING_SNAKE_CASE=10_00 ):
__lowerCAmelCase : List[Any] = p_stop
__lowerCAmelCase : List[Any] = max_length
def __iter__( self ):
__lowerCAmelCase : Any = 0
__lowerCAmelCase : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCAmelCase : str = random.random() < self.p_stop
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : int = [
BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
for i in range(2 )
]
__lowerCAmelCase : Dict = [list(_SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards] , [len(_SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCAmelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCAmelCase : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCAmelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
__lowerCAmelCase : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [[], []]
self.check_batch_sampler_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCAmelCase : str = [BatchSamplerShard(_SCREAMING_SNAKE_CASE , 2 , _SCREAMING_SNAKE_CASE , even_batches=_SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False ):
random.seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [
IterableDatasetShard(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE , num_processes=_SCREAMING_SNAKE_CASE , process_index=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE , )
for i in range(_SCREAMING_SNAKE_CASE )
]
__lowerCAmelCase : List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : str = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCAmelCase : List[str] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(_SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
__lowerCAmelCase : Optional[int] = []
for idx in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_SCREAMING_SNAKE_CASE ) < len(_SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(_SCREAMING_SNAKE_CASE , reference[: len(_SCREAMING_SNAKE_CASE )] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 42
__lowerCAmelCase : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
__lowerCAmelCase : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = SkipBatchSampler(_SCREAMING_SNAKE_CASE , 2 )
self.assertListEqual(list(_SCREAMING_SNAKE_CASE ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCAmelCase : int = skip_first_batches(_SCREAMING_SNAKE_CASE , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowerCamelCase ( self ):
Accelerator()
__lowerCAmelCase : Any = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 86
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( __a):
SCREAMING_SNAKE_CASE__ = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE__ = """BlipImageProcessor"""
SCREAMING_SNAKE_CASE__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_lowercase =False
super().__init__(UpperCAmelCase , UpperCAmelCase )
_lowercase =self.image_processor
def __call__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> Optional[int]:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_lowercase =self.tokenizer
_lowercase =self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
_lowercase =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
_lowercase =self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
_lowercase =None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A (self ) -> str:
_lowercase =self.tokenizer.model_input_names
_lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 5
|
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34
| 0
|
from ...processing_utils import ProcessorMixin
class _lowercase ( __a):
"""simple docstring"""
A__ = """SpeechT5FeatureExtractor"""
A__ = """SpeechT5Tokenizer"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = kwargs.pop("audio" , __lowerCamelCase )
lowerCamelCase__ : List[str] = kwargs.pop("text" , __lowerCamelCase )
lowerCamelCase__ : Dict = kwargs.pop("text_target" , __lowerCamelCase )
lowerCamelCase__ : List[str] = kwargs.pop("audio_target" , __lowerCamelCase )
lowerCamelCase__ : List[str] = kwargs.pop("sampling_rate" , __lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCamelCase__ : List[str] = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
elif text is not None:
lowerCamelCase__ : Tuple = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
else:
lowerCamelCase__ : str = None
if audio_target is not None:
lowerCamelCase__ : Optional[int] = self.feature_extractor(audio_target=__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = targets["input_values"]
elif text_target is not None:
lowerCamelCase__ : Optional[Any] = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Optional[int] = targets["input_ids"]
else:
lowerCamelCase__ : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Union[str, Any] = labels
lowerCamelCase__ : int = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase__ : Any = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = kwargs.pop("input_values" , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = kwargs.pop("input_ids" , __lowerCamelCase )
lowerCamelCase__ : List[Any] = kwargs.pop("labels" , __lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCamelCase__ : Tuple = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif input_ids is not None:
lowerCamelCase__ : Optional[Any] = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
else:
lowerCamelCase__ : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowerCamelCase , __lowerCamelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ : Dict = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : List[str] = targets["input_ids"]
else:
lowerCamelCase__ : Tuple = self.feature_extractor.feature_size
lowerCamelCase__ : int = self.feature_extractor.num_mel_bins
lowerCamelCase__ : str = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : List[Any] = feature_size_hack
lowerCamelCase__ : str = targets["input_values"]
else:
lowerCamelCase__ : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Optional[int] = labels
lowerCamelCase__ : str = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase__ : List[str] = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
| 184
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_a )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_a )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_a , _a , _a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 39
|
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( __a ):
def __init__( self , *_a , _a=None , _a=None , **_a ) -> Union[str, Any]:
super().__init__(*_a , **_a )
_A : Union[str, Any] = eval_examples
_A : str = post_process_function
def a__ ( self , _a=None , _a=None , _a=None , _a = "eval" ) -> str:
_A : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
_A : Optional[Any] = self.get_eval_dataloader(_a )
_A : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A : Union[str, Any] = self.compute_metrics
_A : Optional[int] = None
_A : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A : Optional[Any] = time.time()
try:
_A : int = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
_A : Union[str, Any] = compute_metrics
_A : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A : Any = self.post_process_function(_a , _a , output.predictions )
_A : List[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A : List[str] = metrics.pop(_a )
metrics.update(output.metrics )
else:
_A : Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def a__ ( self , _a , _a , _a=None , _a = "test" ) -> int:
_A : List[str] = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
_A : Optional[Any] = self.compute_metrics
_A : Optional[int] = None
_A : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A : Dict = time.time()
try:
_A : int = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
_A : int = compute_metrics
_A : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A : List[Any] = self.post_process_function(_a , _a , output.predictions , """predict""" )
_A : Optional[Any] = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A : List[Any] = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 26
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A =logging.getLogger(__name__)
A ='Hello world! cécé herlolip'
A =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case_ (_a : List[Any] , _a : Any ):
UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase = torch.load(_a , lambda _a , _a : storage )
UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a )
original.eval()
UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase = encoder_input_ids
UpperCAmelCase = decoder_input_ids
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0]
UpperCAmelCase = original.generator(_a )
UpperCAmelCase = new_model(
_a , _a , _a , _a , _a )[0]
UpperCAmelCase = new_model.generator(_a )
UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 34
| 0
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : Union[str, Any] = 'Hello world! cécé herlolip'
lowerCAmelCase_ : Any = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> List[Any]:
_a = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_a = torch.load(_a , lambda lowercase , lowercase : storage )
_a = AbsSummarizer(_a , torch.device("cpu" ) , _a )
original.eval()
_a = BertAbsSummarizer(_a , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models\' outputs are identical" )
_a = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_a = tokenizer.encode("This is sample éàalj\'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
_a = torch.tensor(_a ).unsqueeze(0 )
_a = tokenizer.encode("This is sample 3 éàalj\'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
_a = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a = encoder_input_ids
_a = decoder_input_ids
_a = _a = None
_a = None
_a = _a = None
_a = _a = None
_a = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a = original(_a , _a , _a , _a , _a , _a , _a )[0]
_a = original.generator(_a )
_a = new_model(
_a , _a , _a , _a , _a )[0]
_a = new_model.generator(_a )
_a = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a ) )
_a = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a ) )
_a = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model\'s state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 63
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34
| 0
|
"""simple docstring"""
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
while a != 0:
snake_case_, snake_case_ :Tuple = b % a, a
return b
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if gcd(_a, _a ) != 1:
snake_case_ :str = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_a )
snake_case_, snake_case_, snake_case_ :Optional[int] = 1, 0, a
snake_case_, snake_case_, snake_case_ :Optional[Any] = 0, 1, m
while va != 0:
snake_case_ :Any = ua // va
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ :Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 66
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
A ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def snake_case_ (_a : str ):
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , )
return preprocessor
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase = sorted(set(_a ) )
UpperCAmelCase = len(_a )
UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )}
UpperCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = '''efficientnet.''' + item[1]
UpperCAmelCase = '''classifier.weight'''
UpperCAmelCase = '''classifier.bias'''
return key_mapping
def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(_a ) )
else:
UpperCAmelCase = torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ):
UpperCAmelCase = model_classes[model_name](
include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(_a )
UpperCAmelCase = EfficientNetForImageClassification(_a ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase = rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(_a )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**_a )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(_a )
UpperCAmelCase = np.expand_dims(_a , axis=0 )
UpperCAmelCase = original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
A =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase : str = logging.get_logger(__name__)
class A__ ( __a ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 52
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class a ( __a ):
UpperCamelCase : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =size if size is not None else {"""shortest_edge""": 256}
SCREAMING_SNAKE_CASE_: Dict =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE_: Dict =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Dict =do_resize
SCREAMING_SNAKE_CASE_: Tuple =size
SCREAMING_SNAKE_CASE_: List[Any] =resample
SCREAMING_SNAKE_CASE_: Optional[int] =do_center_crop
SCREAMING_SNAKE_CASE_: List[str] =crop_size
SCREAMING_SNAKE_CASE_: List[str] =do_rescale
SCREAMING_SNAKE_CASE_: Optional[Any] =rescale_factor
SCREAMING_SNAKE_CASE_: int =do_normalize
SCREAMING_SNAKE_CASE_: str =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_: str =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: int =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Any =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: int =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: str =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: int =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: str =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Dict =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Dict =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: List[str] =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Optional[Any] =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: List[Any] =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Optional[int] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Tuple =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] =[self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: List[Any] =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : List[Tuple] = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] =target_sizes.numpy()
SCREAMING_SNAKE_CASE_: str =[]
for idx in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_: int =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_: List[str] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 173
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _a ( __a ):
__a : str = ["""vqvae"""]
def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowercase ) else 1_000
@torch.no_grad()
def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
UpperCAmelCase = noise
UpperCAmelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
UpperCAmelCase = self.mel.audio_slice_to_image(lowercase )
UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase = (input_image / 255) * 2 - 1
UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
UpperCAmelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase = int(mask_start_secs * pixels_per_second )
UpperCAmelCase = int(mask_end_secs * pixels_per_second )
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample''']
else:
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
if isinstance(self.scheduler , lowercase ):
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample''']
else:
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCAmelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase = self.vqvae.decode(lowercase )['''sample''']
UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase = (images * 255).round().astype('''uint8''' )
UpperCAmelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase = (sample / 255) * 2 - 1
UpperCAmelCase = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase = self.scheduler.alphas_cumprod[t]
UpperCAmelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ):
'''simple docstring'''
UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 34
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__UpperCamelCase = TypeVar('''T''')
__UpperCamelCase = TypeVar('''U''')
class UpperCamelCase ( Generic[T, U] ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = key
snake_case_ = val
snake_case_ = None
snake_case_ = None
def __repr__( self) -> Tuple:
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next)}, has prev: {bool(self.prev)}'
)
class UpperCamelCase ( Generic[T, U] ):
def __init__( self) -> Any:
snake_case_ = DoubleLinkedListNode(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DoubleLinkedListNode(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ , snake_case_ = self.rear, self.head
def __repr__( self) -> List[Any]:
snake_case_ = ['DoubleLinkedList']
snake_case_ = self.head
while node.next is not None:
rep.append(str(lowerCAmelCase__))
snake_case_ = node.next
rep.append(str(self.rear))
return ",\n ".join(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case_ = node
snake_case_ = previous
snake_case_ = node
snake_case_ = self.rear
def a_ ( self, lowerCAmelCase__) -> Tuple:
if node.prev is None or node.next is None:
return None
snake_case_ = node.next
snake_case_ = node.prev
snake_case_ = None
snake_case_ = None
return node
class UpperCamelCase ( Generic[T, U] ):
SCREAMING_SNAKE_CASE_ = {}
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = DoubleLinkedList()
snake_case_ = capacity
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = {}
def __repr__( self) -> int:
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self, lowerCAmelCase__) -> Dict:
return key in self.cache
def a_ ( self, lowerCAmelCase__) -> Tuple:
if key in self.cache:
self.hits += 1
snake_case_ = self.cache[key]
snake_case_ = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCAmelCase__)
return node.val
self.miss += 1
return None
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case_ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCAmelCase__) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case_ = DoubleLinkedListNode(lowerCAmelCase__, lowerCAmelCase__)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case_ = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
snake_case_ = value
self.list.add(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__ = 128) -> List[Any]:
def cache_decorator_inner(lowerCAmelCase__) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase__) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case_ = LRUCache(lowerCAmelCase__)
snake_case_ = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
snake_case_ = func(*lowerCAmelCase__)
cls.decorator_function_to_instance_map[func].put(args[0], lowerCAmelCase__)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCAmelCase__, 'cache_info', lowerCAmelCase__) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34
| 0
|
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( snake_case , snake_case = 0.0 , snake_case = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34
| 0
|
"""simple docstring"""
lowerCamelCase__ = """Tobias Carryer"""
from time import time
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=int(time() ) ): # noqa: B008
__lowerCAmelCase : List[Any] = multiplier
__lowerCAmelCase : Any = increment
__lowerCAmelCase : Tuple = modulo
__lowerCAmelCase : Tuple = seed
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCamelCase__ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 86
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
|
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34
| 0
|
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase_ ( _A : Any ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int = create_tensor(_a )
lowerCamelCase__ : Union[str, Any] = gather(_a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = [state.process_index]
lowerCamelCase__ : str = gather_object(_a )
assert len(_a ) == state.num_processes, F"{gathered_obj}, {len(_a )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = create_tensor(_a )
lowerCamelCase__ : Tuple = broadcast(_a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
if state.is_main_process:
lowerCamelCase__ : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
lowerCamelCase__ : int = pad_across_processes(_a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
if state.num_processes != 2:
return
lowerCamelCase__ : Any = create_tensor(_a )
lowerCamelCase__ : int = reduce(_a , "sum" )
lowerCamelCase__ : Optional[int] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}"
def lowercase_ ( _A : Optional[Any] ):
"""simple docstring"""
if state.num_processes != 2:
return
lowerCamelCase__ : Any = create_tensor(_a )
lowerCamelCase__ : List[str] = reduce(_a , "mean" )
lowerCamelCase__ : Optional[int] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}"
def lowercase_ ( _A : Any ):
"""simple docstring"""
main()
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(_a )
state.print("testing gather_object" )
test_gather_object(_a )
state.print("testing broadcast" )
test_broadcast(_a )
state.print("testing pad_across_processes" )
test_pad_across_processes(_a )
state.print("testing reduce_sum" )
test_reduce_sum(_a )
state.print("testing reduce_mean" )
test_reduce_mean(_a )
if __name__ == "__main__":
main()
| 184
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(_a , _a )
if weight_type is not None:
_UpperCAmelCase = getattr(_a , _a ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
elif weight_type == "inv_freq":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(_a )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , _a )
if "pos_bias_u" in name:
_UpperCAmelCase = None
elif "pos_bias_v" in name:
_UpperCAmelCase = None
elif "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
elif "running_mean" in name:
_UpperCAmelCase = 'running_mean'
elif "inv_freq" in name:
_UpperCAmelCase = 'inv_freq'
elif "running_var" in name:
_UpperCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
_UpperCAmelCase = 'num_batches_tracked'
else:
_UpperCAmelCase = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_a )
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True )-> Optional[int]:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(_a , hidden_act='swish' )
else:
_UpperCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_UpperCAmelCase = 'rotary'
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(_a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(_a , 'vocab.json' )
if not os.path.isdir(_a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_a ) )
return
os.makedirs(_a , exist_ok=_a )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 0
_UpperCAmelCase = 1
with open(_a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_a , _a )
_UpperCAmelCase = WavaVecaCTCTokenizer(
_a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_a , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_a , return_attention_mask=_a , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=_a , tokenizer=_a )
processor.save_pretrained(_a )
_UpperCAmelCase = WavaVecaConformerForCTC(_a )
else:
_UpperCAmelCase = WavaVecaConformerForPreTraining(_a )
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
_UpperCAmelCase = fairseq.tasks.setup_task(_a )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_a )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(_a , _a , not is_finetuned )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_a = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 39
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowercase ( __a ):
_a = """owlvit_text_model"""
def __init__( self , _a=4_9408 , _a=512 , _a=2048 , _a=12 , _a=8 , _a=16 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9406 , _a=4_9407 , **_a , ) -> Union[str, Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : List[Any] = vocab_size
_A : List[str] = hidden_size
_A : List[str] = intermediate_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Tuple = max_position_embeddings
_A : Any = hidden_act
_A : Tuple = layer_norm_eps
_A : List[str] = attention_dropout
_A : Tuple = initializer_range
_A : int = initializer_factor
@classmethod
def a__ ( cls , _a , **_a ) -> List[str]:
cls._set_token_in_kwargs(_a )
_A , _A : List[Any] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_A : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( __a ):
_a = """owlvit_vision_model"""
def __init__( self , _a=768 , _a=3072 , _a=12 , _a=12 , _a=3 , _a=768 , _a=32 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Optional[int]:
super().__init__(**_a )
_A : Union[str, Any] = hidden_size
_A : Union[str, Any] = intermediate_size
_A : int = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Dict = num_channels
_A : Optional[int] = image_size
_A : int = patch_size
_A : Tuple = hidden_act
_A : Tuple = layer_norm_eps
_A : List[Any] = attention_dropout
_A : Union[str, Any] = initializer_range
_A : List[Any] = initializer_factor
@classmethod
def a__ ( cls , _a , **_a ) -> Optional[int]:
cls._set_token_in_kwargs(_a )
_A , _A : Optional[int] = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_A : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class lowercase ( __a ):
_a = """owlvit"""
_a = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6592 , _a=True , **_a , ) -> Optional[int]:
super().__init__(**_a )
if text_config is None:
_A : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_A : List[str] = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_A : Tuple = OwlViTTextConfig(**_a )
_A : int = OwlViTVisionConfig(**_a )
_A : Optional[Any] = projection_dim
_A : Dict = logit_scale_init_value
_A : Tuple = return_dict
_A : Union[str, Any] = 1.0
@classmethod
def a__ ( cls , _a , **_a ) -> List[Any]:
cls._set_token_in_kwargs(_a )
_A , _A : Tuple = cls.get_config_dict(_a , **_a )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Optional[int]:
_A : List[Any] = {}
_A : Dict = text_config
_A : List[Any] = vision_config
return cls.from_dict(_a , **_a )
def a__ ( self ) -> str:
_A : Union[str, Any] = copy.deepcopy(self.__dict__ )
_A : Dict = self.text_config.to_dict()
_A : Union[str, Any] = self.vision_config.to_dict()
_A : List[Any] = self.__class__.model_type
return output
class lowercase ( __a ):
@property
def a__ ( self ) -> Tuple:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def a__ ( self ) -> Optional[Any]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def a__ ( self ) -> Union[str, Any]:
return 1e-4
def a__ ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Optional[Any]:
_A : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a )
_A : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_a , framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def a__ ( self ) -> Any:
return 14
| 26
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
from PIL import Image
def _lowerCamelCase ( lowercase : Image , lowercase : int ) -> Tuple:
_a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase_ : Optional[int] = change_contrast(img, 1_70)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 63
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
| 0
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 66
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCamelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=None , A_=True , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase : Tuple = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase : List[str] = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : Dict = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : int = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Any = do_convert_rgb
UpperCamelCase : Optional[int] = [512, 1024, 2048, 4096]
UpperCamelCase : int = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __UpperCamelCase( self ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCamelCase : List[Any] = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( __a , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = PixaStructImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_convert_rgb" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : Optional[int] = 2048
UpperCamelCase : Tuple = image_processor(A_ , return_tensors="pt" , max_patches=A_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : Any = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : List[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(A_ ):
UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
UpperCamelCase : Union[str, Any] = "Hello"
UpperCamelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ , header_text=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : Dict = image_processor(
A_ , return_tensors="pt" , max_patches=A_ , header_text=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
UpperCamelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : str = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : Dict = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( __a , unittest.TestCase ):
_UpperCAmelCase :Tuple = PixaStructImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase : Union[str, Any] = 3
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_convert_rgb" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Optional[int] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(
A_ , return_tensors="pt" , max_patches=A_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 52
|
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( lowercase = "" ):
SCREAMING_SNAKE_CASE_: int =url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
SCREAMING_SNAKE_CASE_: Optional[int] =BeautifulSoup(requests.get(_a ).text , """html.parser""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =soup.find_all("""td""" , attrs="""titleColumn""" )
SCREAMING_SNAKE_CASE_: Optional[int] =soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_a , _a )
}
def __magic_name__ ( lowercase = "IMDb_Top_250_Movies.csv" ):
SCREAMING_SNAKE_CASE_: List[str] =get_imdb_top_aaa_movies()
with open(_a , """w""" , newline="""""" ) as out_file:
SCREAMING_SNAKE_CASE_: Optional[int] =csv.writer(_a )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 173
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = """imagegpt"""
SCREAMING_SNAKE_CASE_ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, lowerCAmelCase__=512 + 1, lowerCAmelCase__=32 * 32, lowerCAmelCase__=512, lowerCAmelCase__=24, lowerCAmelCase__=8, lowerCAmelCase__=None, lowerCAmelCase__="quick_gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.02, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Optional[int]:
snake_case_ = vocab_size
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = scale_attn_weights
snake_case_ = use_cache
snake_case_ = scale_attn_by_inverse_layer_idx
snake_case_ = reorder_and_upcast_attn
snake_case_ = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCAmelCase__, **lowerCAmelCase__)
class UpperCamelCase ( __a ):
@property
def a_ ( self) -> List[str]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
])
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = 1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = 3, lowerCAmelCase__ = 32, lowerCAmelCase__ = 32, ) -> Optional[Any]:
snake_case_ = self._generate_dummy_images(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = dict(preprocessor(images=lowerCAmelCase__, return_tensors=lowerCAmelCase__))
return inputs
| 69
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
A__ = """."""
if __name__ == "__main__":
A__ = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
A__ = []
A__ = []
with open(doctest_file_path) as fp:
for line in fp:
A__ = line.strip()
A__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
A__ = """\n""".join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 82
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : List[str] ):
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase = 1_2
UpperCAmelCase = 1_2
elif "14-14" in model_name:
UpperCAmelCase = 1_4
UpperCAmelCase = 1_4
elif "16-16" in model_name:
UpperCAmelCase = 1_6
UpperCAmelCase = 1_6
else:
raise ValueError('''Model not supported''' )
UpperCAmelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCAmelCase = 3_5
UpperCAmelCase = '''speech-commands-v2-id2label.json'''
else:
UpperCAmelCase = 5_2_7
UpperCAmelCase = '''audioset-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (_a : Tuple ):
if "module.v" in name:
UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def snake_case_ (_a : Dict , _a : List[Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ):
UpperCAmelCase = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCAmelCase = dataset[0]['''audio''']['''array''']
else:
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( __a):
A_ : torch.FloatTensor
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE=(64,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE=True , ):
super().__init__()
__lowerCAmelCase : str = layers_per_block
__lowerCAmelCase : Dict = torch.nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Optional[int] = nn.ModuleList([] )
# down
__lowerCAmelCase : int = block_out_channels[0]
for i, down_block_type in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = output_channel
__lowerCAmelCase : Optional[Any] = block_out_channels[i]
__lowerCAmelCase : Any = i == len(_SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase : Tuple = get_down_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
self.down_blocks.append(_SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# out
__lowerCAmelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_SCREAMING_SNAKE_CASE , eps=1E-6 )
__lowerCAmelCase : Tuple = nn.SiLU()
__lowerCAmelCase : Optional[Any] = 2 * out_channels if double_z else out_channels
__lowerCAmelCase : List[Any] = nn.Convad(block_out_channels[-1] , _SCREAMING_SNAKE_CASE , 3 , padding=1 )
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = x
__lowerCAmelCase : Any = self.conv_in(_SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*_SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCAmelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase : Any = down_block(_SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase : Tuple = self.mid_block(_SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase : List[Any] = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.conv_act(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.conv_out(_SCREAMING_SNAKE_CASE )
return sample
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE=(64,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
__lowerCAmelCase : List[Any] = layers_per_block
__lowerCAmelCase : Any = nn.Convad(
_SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : List[Any] = nn.ModuleList([] )
__lowerCAmelCase : Tuple = in_channels if norm_type == 'spatial' else None
# mid
__lowerCAmelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , )
# up
__lowerCAmelCase : int = list(reversed(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = output_channel
__lowerCAmelCase : List[str] = reversed_block_out_channels[i]
__lowerCAmelCase : Any = i == len(_SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase : int = get_up_block(
_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_SCREAMING_SNAKE_CASE , resnet_groups=_SCREAMING_SNAKE_CASE , attention_head_dim=_SCREAMING_SNAKE_CASE , temb_channels=_SCREAMING_SNAKE_CASE , resnet_time_scale_shift=_SCREAMING_SNAKE_CASE , )
self.up_blocks.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase : List[str] = SpatialNorm(block_out_channels[0] , _SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_SCREAMING_SNAKE_CASE , eps=1E-6 )
__lowerCAmelCase : str = nn.SiLU()
__lowerCAmelCase : Any = nn.Convad(block_out_channels[0] , _SCREAMING_SNAKE_CASE , 3 , padding=1 )
__lowerCAmelCase : List[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : int = z
__lowerCAmelCase : Any = self.conv_in(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*_SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , use_reentrant=_SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase : List[str] = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = sample.to(_SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase : Union[str, Any] = up_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase : Optional[Any] = self.conv_norm_out(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Any = self.conv_norm_out(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.conv_act(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.conv_out(_SCREAMING_SNAKE_CASE )
return sample
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="random" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ):
super().__init__()
__lowerCAmelCase : List[Any] = n_e
__lowerCAmelCase : List[str] = vq_embed_dim
__lowerCAmelCase : Optional[int] = beta
__lowerCAmelCase : Union[str, Any] = legacy
__lowerCAmelCase : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCAmelCase : List[Any] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase : Union[str, Any] = self.used.shape[0]
__lowerCAmelCase : Tuple = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase : Union[str, Any] = self.re_embed
__lowerCAmelCase : Any = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
__lowerCAmelCase : int = n_e
__lowerCAmelCase : Dict = sane_index_shape
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = inds.shape
assert len(_SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase : Tuple = inds.reshape(ishape[0] , -1 )
__lowerCAmelCase : str = self.used.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase : Any = match.argmax(-1 )
__lowerCAmelCase : Union[str, Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase : Optional[Any] = self.unknown_index
return new.reshape(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = inds.shape
assert len(_SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase : List[str] = inds.reshape(ishape[0] , -1 )
__lowerCAmelCase : Optional[int] = self.used.to(_SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase : Optional[Any] = 0 # simply set to zero
__lowerCAmelCase : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _SCREAMING_SNAKE_CASE )
return back.reshape(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCAmelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase : List[Any] = torch.argmin(torch.cdist(_SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
__lowerCAmelCase : Any = self.embedding(_SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase : int = None
__lowerCAmelCase : Dict = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase : Dict = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase : Optional[int] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase : Dict = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCAmelCase : Dict = self.remap_to_used(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.remap is not None:
__lowerCAmelCase : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCAmelCase : Dict = self.unmap_to_all(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase : List[Any] = self.embedding(_SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase : str = z_q.view(_SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( __a):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : int = parameters
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , dim=1 )
__lowerCAmelCase : Dict = torch.clamp(self.logvar , -30.0 , 20.0 )
__lowerCAmelCase : int = deterministic
__lowerCAmelCase : Dict = torch.exp(0.5 * self.logvar )
__lowerCAmelCase : List[str] = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : List[str] = randn_tensor(
self.mean.shape , generator=_SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCAmelCase : Union[str, Any] = self.mean + self.std * sample
return x
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
return self.mean
| 86
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
_lowercase =XGBClassifier()
classifier.fit(_a , _a )
return classifier
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =load_iris()
_lowercase , _lowercase =data_handling(_a )
_lowercase , _lowercase , _lowercase , _lowercase =train_test_split(
_a , _a , test_size=0.25 )
_lowercase =iris['''target_names''']
# Create an XGBoost Classifier from the training data
_lowercase =xgboost(_a , _a )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_a , _a , _a , display_labels=_a , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 5
|
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34
| 0
|
from __future__ import annotations
def lowercase_ ( _A : list , _A : int | None = None , _A : int | None = None ):
"""simple docstring"""
if start is None:
lowerCamelCase__ : str = 0
if end is None:
lowerCamelCase__ : Tuple = len(_a ) - 1
if start >= end:
return
lowerCamelCase__ : Optional[Any] = (start + end) // 2
slowsort(_a , _a , _a )
slowsort(_a , mid + 1 , _a )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 184
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( __a):
"""simple docstring"""
UpperCamelCase__ = """gptj"""
UpperCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCAmelCase=5_0400 , UpperCAmelCase=2048 , UpperCAmelCase=4096 , UpperCAmelCase=28 , UpperCAmelCase=16 , UpperCAmelCase=64 , UpperCAmelCase=None , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=5_0256 , UpperCAmelCase=5_0256 , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = rotary_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __lowerCamelCase ( __a):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = "default" , UpperCAmelCase = None , UpperCAmelCase = False , ):
"""simple docstring"""
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , UpperCAmelCase ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='inputs' )
_UpperCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
"""simple docstring"""
_UpperCAmelCase = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs['attention_mask']
if self.use_past:
_UpperCAmelCase = ordered_inputs['attention_mask'].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 13
| 39
|
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34
| 0
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None ):
if rng is None:
_A : Any = random.Random()
_A : int = 1
for dim in shape:
total_dims *= dim
_A : List[Any] = []
for _ in range(_a ):
values.append(rng.randint(0,vocab_size - 1 ) )
_A : Optional[int] = np.array(_a,dtype=jnp.intaa ).reshape(_a )
return output
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Tuple = ids_tensor(_a,vocab_size=2,rng=_a )
# make sure that at least one token is attended to for each batch
_A : Tuple = 1
return attn_mask
@require_flax
class lowercase :
_a = None
_a = ()
def a__ ( self ) -> List[str]:
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_A : Optional[int] = 2
_A : Union[str, Any] = inputs["""input_ids"""].shape[-1] // 2
_A : str = inputs["""input_ids"""][:max_batch_size, :sequence_length]
_A : Optional[int] = jnp.ones_like(_a )
_A : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_A : List[str] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_A : Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self ) -> str:
_A , _A , _A , _A : Any = self._get_input_ids_and_config()
_A : int = False
_A : Union[str, Any] = max_length
_A : str = 0
for model_class in self.all_generative_model_classes:
_A : int = model_class(_a )
_A : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_A : str = getattr(_a , _a )
_A : Optional[int] = pt_model_class(_a ).eval()
_A : List[str] = load_flax_weights_in_pytorch_model(_a , flax_model.params )
_A : Any = flax_model.generate(_a ).sequences
_A : Optional[int] = pt_model.generate(torch.tensor(_a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_A : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
_A , _A , _A , _A : Union[str, Any] = self._get_input_ids_and_config()
_A : Tuple = False
_A : Any = max_length
for model_class in self.all_generative_model_classes:
_A : List[str] = model_class(_a )
_A : Optional[Any] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Tuple = jit(model.generate )
_A : Optional[Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Dict:
_A , _A , _A , _A : List[str] = self._get_input_ids_and_config()
_A : List[Any] = True
_A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : int = model_class(_a )
_A : Union[str, Any] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : str = jit(model.generate )
_A : List[str] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
_A , _A , _A , _A : Any = self._get_input_ids_and_config()
_A : Dict = False
_A : Tuple = max_length
_A : List[Any] = 2
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : Optional[int] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Union[str, Any] = jit(model.generate )
_A : Union[str, Any] = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Union[str, Any]:
_A , _A , _A , _A : Any = self._get_input_ids_and_config()
_A : str = False
_A : List[Any] = max_length
_A : int = 2
_A : Tuple = 2
for model_class in self.all_generative_model_classes:
_A : Any = model_class(_a )
_A : Any = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__ ( self ) -> Optional[Any]:
_A , _A , _A , _A : int = self._get_input_ids_and_config()
_A : Union[str, Any] = True
_A : List[str] = max_length
_A : str = 0.8
_A : Dict = 10
_A : List[Any] = 0.3
_A : str = 1
_A : Tuple = 8
_A : Any = 9
for model_class in self.all_generative_model_classes:
_A : Any = model_class(_a )
_A : List[str] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Dict = jit(model.generate )
_A : Any = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Dict:
_A , _A , _A , _A : Any = self._get_input_ids_and_config()
_A : Optional[int] = max_length
_A : Any = 1
_A : List[Any] = 8
_A : Optional[int] = 9
for model_class in self.all_generative_model_classes:
_A : Tuple = model_class(_a )
_A : int = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Union[str, Any] = jit(model.generate )
_A : Dict = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Tuple:
_A , _A , _A , _A : List[str] = self._get_input_ids_and_config()
_A : str = max_length
_A : Optional[int] = 2
_A : int = 1
_A : int = 8
_A : List[Any] = 9
for model_class in self.all_generative_model_classes:
_A : Union[str, Any] = model_class(_a )
_A : List[Any] = model.generate(_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Any = jit(model.generate )
_A : Tuple = jit_generate(_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Dict:
_A , _A , _A , _A : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_A : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_A : str = False
_A : int = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : Optional[int] = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Dict = jit(model.generate )
_A : Optional[Any] = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[Any]:
_A , _A , _A , _A : int = self._get_input_ids_and_config()
# pad attention mask on the left
_A : List[str] = attention_mask.at[(0, 0)].set(0 )
_A : Optional[Any] = True
_A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[int] = model_class(_a )
_A : str = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : Any = jit(model.generate )
_A : str = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
_A , _A , _A , _A : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
_A : Optional[int] = attention_mask.at[(0, 0)].set(0 )
_A : Dict = 2
_A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = model.generate(_a , attention_mask=_a ).sequences
self.assertEqual(generation_outputs.shape[-1] , _a )
_A : List[Any] = jit(model.generate )
_A : int = jit_generate(_a , attention_mask=_a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
_A : Tuple = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_A : Any = """Hello world"""
_A : Optional[int] = tokenizer(_a , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_a , """do_samples""" ):
model.generate(_a , do_samples=_a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_a , """foo""" ):
_A : Optional[Any] = {"""foo""": """bar"""}
model.generate(_a , **_a )
| 26
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A =logging.getLogger(__name__)
A ='Hello world! cécé herlolip'
A =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case_ (_a : List[Any] , _a : Any ):
UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase = torch.load(_a , lambda _a , _a : storage )
UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a )
original.eval()
UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase = encoder_input_ids
UpperCAmelCase = decoder_input_ids
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0]
UpperCAmelCase = original.generator(_a )
UpperCAmelCase = new_model(
_a , _a , _a , _a , _a )[0]
UpperCAmelCase = new_model.generator(_a )
UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 34
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> str:
return base * power(_a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase_ : Any = int(input('Enter the base: ').strip())
lowerCAmelCase_ : Dict = int(input('Enter the exponent: ').strip())
lowerCAmelCase_ : Union[str, Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase_ : Optional[Any] = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 63
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34
| 0
|
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if nth_term == "":
return [""]
snake_case_ :Union[str, Any] = int(_a )
snake_case_ :Any = int(_a )
snake_case_ :str = []
for temp in range(int(_a ) ):
series.append(f"""1 / {pow(temp + 1, int(_a ) )}""" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = int(input("Enter the last number (nth term) of the P-Series"))
__a = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 66
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
A ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def snake_case_ (_a : str ):
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , )
return preprocessor
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase = sorted(set(_a ) )
UpperCAmelCase = len(_a )
UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )}
UpperCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = '''efficientnet.''' + item[1]
UpperCAmelCase = '''classifier.weight'''
UpperCAmelCase = '''classifier.bias'''
return key_mapping
def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(_a ) )
else:
UpperCAmelCase = torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ):
UpperCAmelCase = model_classes[model_name](
include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(_a )
UpperCAmelCase = EfficientNetForImageClassification(_a ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase = rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(_a )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**_a )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(_a )
UpperCAmelCase = np.expand_dims(_a , axis=0 )
UpperCAmelCase = original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
A =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __a , unittest.TestCase ):
_UpperCAmelCase :int = UnCLIPImageVariationPipeline
_UpperCAmelCase :Any = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
_UpperCAmelCase :Any = IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase :Dict = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(A_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(A_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : int = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
UpperCamelCase : Optional[Any] = UnCLIPTextProjModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
UpperCamelCase : Optional[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(1 )
UpperCamelCase : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.dummy_decoder
UpperCamelCase : Tuple = self.dummy_text_proj
UpperCamelCase : Dict = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : int = self.dummy_super_res_first
UpperCamelCase : List[str] = self.dummy_super_res_last
UpperCamelCase : str = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
UpperCamelCase : Optional[Any] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
UpperCamelCase : Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCamelCase : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCamelCase( self , A_ , A_=0 , A_=True ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
if pil_image:
UpperCamelCase : Optional[Any] = input_image * 0.5 + 0.5
UpperCamelCase : Tuple = input_image.clamp(0 , 1 )
UpperCamelCase : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase : str = DiffusionPipeline.numpy_to_pil(A_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = "cpu"
UpperCamelCase : List[Any] = self.get_dummy_components()
UpperCamelCase : int = self.pipeline_class(**A_ )
UpperCamelCase : Optional[int] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : Optional[Any] = pipe(**A_ )
UpperCamelCase : Dict = output.images
UpperCamelCase : str = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : Dict = pipe(
**A_ , return_dict=A_ , )[0]
UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : List[str] = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : Optional[int] = self.pipeline_class(**A_ )
UpperCamelCase : str = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : str = pipe(**A_ )
UpperCamelCase : Tuple = output.images
UpperCamelCase : Optional[int] = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : Tuple = pipe(
**A_ , return_dict=A_ , )[0]
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Dict = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = "cpu"
UpperCamelCase : Optional[Any] = self.get_dummy_components()
UpperCamelCase : int = self.pipeline_class(**A_ )
UpperCamelCase : int = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : int = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : Optional[int] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
UpperCamelCase : Optional[int] = pipe(**A_ )
UpperCamelCase : int = output.images
UpperCamelCase : Dict = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : Union[str, Any] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
UpperCamelCase : int = pipe(
**A_ , return_dict=A_ , )[0]
UpperCamelCase : int = image[0, -3:, -3:, -1]
UpperCamelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = torch.device("cpu" )
class A__ :
_UpperCAmelCase :Optional[Any] = 1
UpperCamelCase : List[Any] = self.get_dummy_components()
UpperCamelCase : Union[str, Any] = self.pipeline_class(**A_ )
UpperCamelCase : Any = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = torch.Generator(device=A_ ).manual_seed(0 )
UpperCamelCase : int = pipe.decoder.dtype
UpperCamelCase : str = 1
UpperCamelCase : Optional[int] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCamelCase : Tuple = pipe.prepare_latents(
A_ , dtype=A_ , device=A_ , generator=A_ , latents=A_ , scheduler=DummyScheduler() )
UpperCamelCase : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCamelCase : Any = pipe.prepare_latents(
A_ , dtype=A_ , device=A_ , generator=A_ , latents=A_ , scheduler=DummyScheduler() )
UpperCamelCase : str = self.get_dummy_inputs(A_ , pil_image=A_ )
UpperCamelCase : Union[str, Any] = pipe(
**A_ , decoder_latents=A_ , super_res_latents=A_ ).images
UpperCamelCase : Optional[Any] = self.get_dummy_inputs(A_ , pil_image=A_ )
# Don't pass image, instead pass embedding
UpperCamelCase : Union[str, Any] = pipeline_inputs.pop("image" )
UpperCamelCase : int = pipe.image_encoder(A_ ).image_embeds
UpperCamelCase : Optional[int] = pipe(
**A_ , decoder_latents=A_ , super_res_latents=A_ , image_embeddings=A_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCamelCase : Union[str, Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=A_ , expected_max_diff=A_ )
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = torch_device == "cpu"
UpperCamelCase : List[str] = True
UpperCamelCase : List[Any] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=A_ , relax_max_difference=A_ , additional_params_copy_to_batched_inputs=A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCamelCase : List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=A_ , additional_params_copy_to_batched_inputs=A_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=A_ )
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
UpperCamelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
UpperCamelCase : Dict = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
UpperCamelCase : List[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase : Optional[int] = pipeline(
A_ , generator=A_ , output_type="np" , )
UpperCamelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(A_ , A_ , 15 )
| 52
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =b, a % b
return a
def __magic_name__ ( lowercase , lowercase ):
return a if b == 0 else euclidean_gcd_recursive(_a , a % b )
def __magic_name__ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 173
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _a ( __a ):
__a : str = ["""vqvae"""]
def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowercase ) else 1_000
@torch.no_grad()
def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
UpperCAmelCase = noise
UpperCAmelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
UpperCAmelCase = self.mel.audio_slice_to_image(lowercase )
UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase = (input_image / 255) * 2 - 1
UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
UpperCAmelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase = int(mask_start_secs * pixels_per_second )
UpperCAmelCase = int(mask_end_secs * pixels_per_second )
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample''']
else:
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
if isinstance(self.scheduler , lowercase ):
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample''']
else:
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCAmelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase = self.vqvae.decode(lowercase )['''sample''']
UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase = (images * 255).round().astype('''uint8''' )
UpperCAmelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase = (sample / 255) * 2 - 1
UpperCAmelCase = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase = self.scheduler.alphas_cumprod[t]
UpperCAmelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ):
'''simple docstring'''
UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 34
| 0
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = ["""pixel_values"""]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = 8, **lowerCAmelCase__, ) -> str:
super().__init__(**lowerCAmelCase__)
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
snake_case_ = pad_size
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> Any:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None) -> Optional[Any]:
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
snake_case_ = (old_height // size + 1) * size - old_height
snake_case_ = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> List[Any]:
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_pad if do_pad is not None else self.do_pad
snake_case_ = pad_size if pad_size is not None else self.pad_size
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images]
if do_pad:
snake_case_ = [self.pad(lowerCAmelCase__, size=lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 69
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34
| 0
|
from __future__ import annotations
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34
| 0
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''BeitFeatureExtractor''']
UpperCAmelCase__ = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
|
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( __a):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , 'num_heads' ) )
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=64 , UpperCAmelCase=3 , UpperCAmelCase=[16, 48, 96] , UpperCAmelCase=[1, 3, 6] , UpperCAmelCase=[1, 2, 10] , UpperCAmelCase=[7, 3, 3] , UpperCAmelCase=[4, 2, 2] , UpperCAmelCase=[2, 1, 1] , UpperCAmelCase=[2, 2, 2] , UpperCAmelCase=[False, False, True] , UpperCAmelCase=[0.0, 0.0, 0.0] , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=2 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFCvtModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , training=UpperCAmelCase )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFCvtForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( __a , __a , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFCvtModelTester(self )
_UpperCAmelCase = TFCvtConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFCvtModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Any:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = multiprocessing.Manager()
_A : Optional[int] = manager.list()
_A : Tuple = multiprocessing.Process(target=_a,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_A : Optional[Any] = shutil.rmtree
_A : Tuple = os.rmdir
_A : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_A : Any = {}
with swallow_io():
with time_limit(_a ):
exec(_a,_a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
_A : List[str] = rmtree
_A : List[Any] = rmdir
_A : List[Any] = chdir
@contextlib.contextmanager
def lowerCAmelCase_ ( snake_case_ ):
def signal_handler(snake_case_,snake_case_ ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL,_a )
signal.signal(signal.SIGALRM,_a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL,0 )
@contextlib.contextmanager
def lowerCAmelCase_ ( ):
_A : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(_a ):
with contextlib.redirect_stderr(_a ):
with redirect_stdin(_a ):
yield
@contextlib.contextmanager
def lowerCAmelCase_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(_a ):
yield dirname
class lowercase ( __a ):
pass
class lowercase ( io.StringIO ):
def a__ ( self , *_a , **_a ) -> int:
raise OSError
def a__ ( self , *_a , **_a ) -> Optional[Any]:
raise OSError
def a__ ( self , *_a , **_a ) -> List[str]:
raise OSError
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return False
class lowercase ( contextlib._RedirectStream ): # type: ignore
_a = """stdin"""
@contextlib.contextmanager
def lowerCAmelCase_ ( snake_case_ ):
if root == ".":
yield
return
_A : List[Any] = os.getcwd()
os.chdir(_a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_a )
def lowerCAmelCase_ ( snake_case_=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_A : List[Any] = None
_A : List[str] = None
import os
_A : List[str] = """1"""
_A : Optional[int] = None
_A : List[Any] = None
_A : Optional[int] = None
_A : Optional[int] = None
_A : Tuple = None
_A : Union[str, Any] = None
_A : List[Any] = None
_A : List[str] = None
_A : Optional[int] = None
_A : Optional[Any] = None
_A : int = None
_A : Tuple = None
_A : Any = None
_A : Dict = None
_A : Optional[Any] = None
_A : str = None
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
_A : Optional[Any] = None
_A : Any = None
_A : Optional[Any] = None
_A : Tuple = None
_A : str = None
_A : List[Any] = None
_A : Optional[Any] = None
_A : Any = None
import shutil
_A : Optional[Any] = None
_A : int = None
_A : str = None
import subprocess
_A : Any = None # type: ignore
_A : Optional[Any] = None
import sys
_A : List[str] = None
_A : Optional[Any] = None
_A : Any = None
_A : List[Any] = None
_A : Optional[Any] = None
| 26
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (__a ):
"""simple docstring"""
__a ="""xlm-roberta"""
def __init__( self : List[str] , __a : Any=3_05_22 , __a : Optional[Any]=7_68 , __a : Optional[int]=12 , __a : List[str]=12 , __a : Optional[Any]=30_72 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : List[str]=0.1 , __a : Union[str, Any]=5_12 , __a : List[str]=2 , __a : Dict=0.02 , __a : Union[str, Any]=1e-1_2 , __a : Tuple=1 , __a : int=0 , __a : Dict=2 , __a : List[Any]="absolute" , __a : List[str]=True , __a : Dict=None , **__a : str , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (__a ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class lowerCamelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = BartphoTokenizer
_A : str = False
_A : str = True
def lowerCAmelCase_ ( self: Any ) -> Any:
super().setUp()
snake_case_ :int = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
snake_case_ :List[str] = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ :str = {"""unk_token""": """<unk>"""}
snake_case_ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
snake_case_ :Tuple = BartphoTokenizer(snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self: Tuple , **snake_case: List[str] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: int ) -> List[str]:
snake_case_ :Optional[Any] = """This is a là test"""
snake_case_ :Tuple = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case_ :Dict = BartphoTokenizer(snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
snake_case_ :Optional[int] = """This is a là test"""
snake_case_ :str = """▁This ▁is ▁a ▁l à ▁t est""".split()
snake_case_ :List[str] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ :Optional[Any] = tokens + [tokenizer.unk_token]
snake_case_ :List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
| 66
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
| 0
|
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Optional[int] = []
UpperCamelCase : Dict = set({"(", "[", "{"} )
UpperCamelCase : Any = set({")", "]", "}"} )
UpperCamelCase : List[Any] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_a ) == 0 or (len(_a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_a ) == 0
def A_ ( ) -> Union[str, Any]:
UpperCamelCase : Dict = input("Enter sequence of brackets: " )
if is_balanced(_a ):
print(_a , "is balanced" )
else:
print(_a , "is not balanced" )
if __name__ == "__main__":
main()
| 52
|
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Dict:
snake_case_ = [[1, 2, 4], [1, 2, 3, 4]]
snake_case_ = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids, lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def a_ ( self) -> str:
snake_case_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def a_ ( self) -> List[str]:
snake_case_ = [[1, 2, 3], [1, 2, 4]]
snake_case_ = DisjunctiveConstraint(lowerCAmelCase__)
snake_case_ , snake_case_ , snake_case_ = dc.update(1)
snake_case_ = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
snake_case_ , snake_case_ , snake_case_ = dc.update(2)
snake_case_ = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
snake_case_ , snake_case_ , snake_case_ = dc.update(3)
snake_case_ = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def a_ ( self) -> Tuple:
snake_case_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case_ = DisjunctiveConstraint(lowerCAmelCase__)
snake_case_ , snake_case_ , snake_case_ = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
snake_case_ , snake_case_ , snake_case_ = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
snake_case_ , snake_case_ , snake_case_ = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
snake_case_ , snake_case_ , snake_case_ = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
snake_case_ , snake_case_ , snake_case_ = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
snake_case_ , snake_case_ , snake_case_ = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
snake_case_ , snake_case_ , snake_case_ = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 69
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 82
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : List[str] ):
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase = 1_2
UpperCAmelCase = 1_2
elif "14-14" in model_name:
UpperCAmelCase = 1_4
UpperCAmelCase = 1_4
elif "16-16" in model_name:
UpperCAmelCase = 1_6
UpperCAmelCase = 1_6
else:
raise ValueError('''Model not supported''' )
UpperCAmelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCAmelCase = 3_5
UpperCAmelCase = '''speech-commands-v2-id2label.json'''
else:
UpperCAmelCase = 5_2_7
UpperCAmelCase = '''audioset-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (_a : Tuple ):
if "module.v" in name:
UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def snake_case_ (_a : Dict , _a : List[Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ):
UpperCAmelCase = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCAmelCase = dataset[0]['''audio''']['''array''']
else:
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __a):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_encoder_blocks' ) )
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[16, 32, 64, 1_28] , _SCREAMING_SNAKE_CASE=[1, 4, 8, 16] , _SCREAMING_SNAKE_CASE=[1, 2, 4, 8] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : Dict = num_encoder_blocks
__lowerCAmelCase : List[Any] = sr_ratios
__lowerCAmelCase : str = depths
__lowerCAmelCase : Any = hidden_sizes
__lowerCAmelCase : List[str] = downsampling_rates
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : str = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Dict = num_labels
__lowerCAmelCase : Dict = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = SegformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Any = SegformerForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = config_and_inputs
__lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase):
A_ : Optional[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A_ : Dict = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : str = False
A_ : Dict = False
A_ : int = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = SegformerModelTester(self )
__lowerCAmelCase : Tuple = SegformerConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip('SegFormer does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : str = True
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = outputs.attentions
__lowerCAmelCase : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
__lowerCAmelCase : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__lowerCAmelCase : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowerCAmelCase : Any = (self.model_tester.image_size // 32) ** 2
__lowerCAmelCase : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowerCAmelCase : Any = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
__lowerCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
__lowerCAmelCase : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : int = outputs.hidden_states
__lowerCAmelCase : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
__lowerCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Union[str, Any] = SegformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = prepare_img()
__lowerCAmelCase : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prepare_img()
__lowerCAmelCase : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowerCAmelCase : Optional[int] = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-1 ) )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = prepare_img()
__lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowerCAmelCase : Dict = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = outputs.logits.detach().cpu()
__lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(5_00, 3_00)] )
__lowerCAmelCase : Tuple = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 86
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( __snake_case=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_lowercase =subparsers.add_parser('''env''' )
else:
_lowercase =argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_a , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
_lowercase =torch.__version__
_lowercase =torch.cuda.is_available()
_lowercase =is_xpu_available()
_lowercase =is_npu_available()
_lowercase ='''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_a ):
_lowercase =load_config_from_file(args.config_file ).to_dict()
_lowercase ={
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"{pt_version} ({pt_cuda_available})",
'''PyTorch XPU available''': str(_a ),
'''PyTorch NPU available''': str(_a ),
'''System RAM''': F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
_lowercase =torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_lowercase =(
'''\n'''.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_a , _a )
else F"\t{accelerate_config}"
)
print(_a )
_lowercase =accelerate_config
return info
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase =env_command_parser()
_lowercase =parser.parse_args()
env_command(_a )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 5
|
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : int = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( __a , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = KandinskyVaaImgaImgPipeline
UpperCamelCase__ = ["""image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase__ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ = False
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 100
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_UpperCAmelCase = DDIMScheduler(**UpperCAmelCase )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
# create init_image
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' ).resize((256, 256) )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
_UpperCAmelCase = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_UpperCAmelCase = 'A red cartoon frog, 4k'
_UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
_UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCAmelCase = pipeline(
image=UpperCAmelCase , image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 39
|
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
_A : str = 10
_A : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
_A : Union[str, Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(_a ) ),
},features=_a,)
return dataset
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_a )
return filename
# FILE_CONTENT + files
_snake_case = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : int = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
_A : Dict = FILE_CONTENT
with open(_a,"""w""" ) as f:
f.write(_a )
return filename
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
import bza
_A : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
_A : Tuple = bytes(_a,"""utf-8""" )
with bza.open(_a,"""wb""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
import gzip
_A : Any = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
_A : List[str] = bytes(_a,"""utf-8""" )
with gzip.open(_a,"""wb""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_A : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
_A : List[Any] = bytes(_a,"""utf-8""" )
with lza.frame.open(_a,"""wb""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_A : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_a,"""w""" ) as archive:
archive.write(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
import tarfile
_A : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_a,"""w""" ) as f:
f.add(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
import lzma
_A : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
_A : Dict = bytes(_a,"""utf-8""" )
with lzma.open(_a,"""wb""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
import zipfile
_A : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_A : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
_A : List[str] = bytes(_a,"""utf-8""" )
with zstd.open(_a,"""wb""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
_A : Optional[int] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_a,"""w""" ) as f:
f.write(_a )
return filename
_snake_case = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_snake_case = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_snake_case = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_snake_case = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_snake_case = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = datasets.Dataset.from_dict(_a )
_A : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_a ) ) as con:
_A : str = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""",tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_a,"""w""",newline="""""" ) as f:
_A : List[str] = csv.DictWriter(_a,fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_a,"""w""",newline="""""" ) as f:
_A : int = csv.DictWriter(_a,fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
import bza
_A : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_a,"""rb""" ) as f:
_A : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_a,"""wb""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename(_a ) )
f.write(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename(csv_path.replace(""".csv""",""".CSV""" ) ) )
f.write(_a,arcname=os.path.basename(csva_path.replace(""".csv""",""".CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.join("""main_dir""",os.path.basename(_a ) ) )
f.write(_a,arcname=os.path.join("""main_dir""",os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
_A : List[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_a,"""wb""" ) as f:
_A : int = pq.ParquetWriter(_a,schema=_a )
_A : Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_a ) )] for k in DATA[0]},schema=_a )
writer.write_table(_a )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_A : List[str] = {"""data""": DATA}
with open(_a,"""w""" ) as f:
json.dump(_a,_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_A : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(_a,"""w""" ) as f:
json.dump(_a,_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_a,"""w""" ) as f:
for item in DATA:
f.write(json.dumps(_a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_a,"""w""" ) as f:
for item in DATA:
f.write(json.dumps(_a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_a,"""w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_a,"""w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_a ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
import gzip
_A : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_a,"""rb""" ) as orig_file:
with gzip.open(_a,"""wb""" ) as zipped_file:
zipped_file.writelines(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
import gzip
_A : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_a,"""rb""" ) as orig_file:
with gzip.open(_a,"""wb""" ) as zipped_file:
zipped_file.writelines(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename(_a ) )
f.write(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.join("""nested""",os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.join("""main_dir""",os.path.basename(_a ) ) )
f.write(_a,arcname=os.path.join("""main_dir""",os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_a,"""w""" ) as f:
f.add(_a,arcname=os.path.basename(_a ) )
f.add(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_a,"""w""" ) as f:
f.add(_a,arcname=os.path.join("""nested""",os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = ["""0""", """1""", """2""", """3"""]
_A : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_a,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : int = ["""0""", """1""", """2""", """3"""]
_A : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_a,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = ["""0""", """1""", """2""", """3"""]
_A : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_a,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename(_a ) )
f.write(_a,arcname=os.path.basename(_a ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.join("""main_dir""",os.path.basename(_a ) ) )
f.write(_a,arcname=os.path.join("""main_dir""",os.path.basename(_a ) ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_a,arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
_A : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_a,"""w""",encoding="""utf-8""" ) as f:
f.write(_a )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
return os.path.join("""tests""","""features""","""data""","""test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( ):
return os.path.join("""tests""","""features""","""data""","""test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_a,"""w""" ) as f:
f.write(_a,arcname=os.path.basename(_a ) )
f.write(_a,arcname=os.path.basename(_a ).replace(""".jpg""","""2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""","""w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""","""w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""","""w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""","""w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""","""w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 26
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A =logging.getLogger(__name__)
A ='Hello world! cécé herlolip'
A =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case_ (_a : List[Any] , _a : Any ):
UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase = torch.load(_a , lambda _a , _a : storage )
UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a )
original.eval()
UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase = encoder_input_ids
UpperCAmelCase = decoder_input_ids
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0]
UpperCAmelCase = original.generator(_a )
UpperCAmelCase = new_model(
_a , _a , _a , _a , _a )[0]
UpperCAmelCase = new_model.generator(_a )
UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 34
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (__a ):
"""simple docstring"""
__a ="""transfo-xl"""
__a =["""mems"""]
__a ={
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , __a : str=26_77_35 , __a : Dict=[2_00_00, 4_00_00, 20_00_00] , __a : Optional[int]=10_24 , __a : List[str]=10_24 , __a : List[Any]=16 , __a : Dict=64 , __a : Union[str, Any]=40_96 , __a : Tuple=4 , __a : Optional[int]=False , __a : List[Any]=18 , __a : List[Any]=16_00 , __a : Tuple=10_00 , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : str=0 , __a : List[Any]=-1 , __a : Any=True , __a : Optional[Any]=0.1 , __a : Optional[Any]=0.0 , __a : Optional[int]=True , __a : List[str]="normal" , __a : Tuple=0.01 , __a : Union[str, Any]=0.01 , __a : Union[str, Any]=0.02 , __a : Tuple=1e-5 , __a : Any=0 , **__a : Union[str, Any] , ):
_a = vocab_size
_a = []
self.cutoffs.extend(__a )
if proj_share_all_but_first:
_a = [False] + [True] * len(self.cutoffs )
else:
_a = [False] + [False] * len(self.cutoffs )
_a = d_model
_a = d_embed
_a = d_head
_a = d_inner
_a = div_val
_a = pre_lnorm
_a = n_layer
_a = n_head
_a = mem_len
_a = same_length
_a = attn_type
_a = clamp_len
_a = sample_softmax
_a = adaptive
_a = dropout
_a = dropatt
_a = untie_r
_a = init
_a = init_range
_a = proj_init_std
_a = init_std
_a = layer_norm_epsilon
super().__init__(eos_token_id=__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any] ):
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 63
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 66
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
A ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def snake_case_ (_a : str ):
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , )
return preprocessor
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase = sorted(set(_a ) )
UpperCAmelCase = len(_a )
UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )}
UpperCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = '''efficientnet.''' + item[1]
UpperCAmelCase = '''classifier.weight'''
UpperCAmelCase = '''classifier.bias'''
return key_mapping
def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(_a ) )
else:
UpperCAmelCase = torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ):
UpperCAmelCase = model_classes[model_name](
include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(_a )
UpperCAmelCase = EfficientNetForImageClassification(_a ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase = rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(_a )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**_a )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(_a )
UpperCAmelCase = np.expand_dims(_a , axis=0 )
UpperCAmelCase = original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
A =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __a , __a ):
_UpperCAmelCase :Union[str, Any] = """convnextv2"""
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : Any = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Any = hidden_act
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Tuple = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures""")
_UpperCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_UpperCAmelCase = get_tests_dir("""fixtures/dummy-config.json""")
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =0
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: str =WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE_: Optional[int] =AutoFeatureExtractor.from_pretrained(lowerCAmelCase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
SCREAMING_SNAKE_CASE_: Any =WavaVecaFeatureExtractor(**lowerCAmelCase )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
config.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_: Dict =json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE_: int =AutoFeatureExtractor.from_pretrained(lowerCAmelCase , revision="""aaaaaa""" )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
SCREAMING_SNAKE_CASE_: List[str] =AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Any =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =AutoFeatureExtractor.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_: int =CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
class a ( __a ):
UpperCamelCase : Optional[Any] = True
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_: List[Any] =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_: Dict =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_: Any =AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCAmelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 173
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _a ( __a ):
__a : str = ["""vqvae"""]
def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowercase ) else 1_000
@torch.no_grad()
def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
UpperCAmelCase = noise
UpperCAmelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
UpperCAmelCase = self.mel.audio_slice_to_image(lowercase )
UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase = (input_image / 255) * 2 - 1
UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
UpperCAmelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase = int(mask_start_secs * pixels_per_second )
UpperCAmelCase = int(mask_end_secs * pixels_per_second )
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample''']
else:
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
if isinstance(self.scheduler , lowercase ):
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample''']
else:
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCAmelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase = self.vqvae.decode(lowercase )['''sample''']
UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase = (images * 255).round().astype('''uint8''' )
UpperCAmelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase = (sample / 255) * 2 - 1
UpperCAmelCase = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase = self.scheduler.alphas_cumprod[t]
UpperCAmelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ):
'''simple docstring'''
UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 34
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class UpperCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = """encodec"""
def __init__( self, lowerCAmelCase__=[1.5, 3.0, 6.0, 12.0, 24.0], lowerCAmelCase__=2_4000, lowerCAmelCase__=1, lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=128, lowerCAmelCase__=32, lowerCAmelCase__=1, lowerCAmelCase__=[8, 5, 4, 2], lowerCAmelCase__="weight_norm", lowerCAmelCase__=7, lowerCAmelCase__=7, lowerCAmelCase__=3, lowerCAmelCase__=2, lowerCAmelCase__=True, lowerCAmelCase__="reflect", lowerCAmelCase__=2, lowerCAmelCase__=2, lowerCAmelCase__=1.0, lowerCAmelCase__=1024, lowerCAmelCase__=None, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[Any]:
snake_case_ = target_bandwidths
snake_case_ = sampling_rate
snake_case_ = audio_channels
snake_case_ = normalize
snake_case_ = chunk_length_s
snake_case_ = overlap
snake_case_ = hidden_size
snake_case_ = num_filters
snake_case_ = num_residual_layers
snake_case_ = upsampling_ratios
snake_case_ = norm_type
snake_case_ = kernel_size
snake_case_ = last_kernel_size
snake_case_ = residual_kernel_size
snake_case_ = dilation_growth_rate
snake_case_ = use_causal_conv
snake_case_ = pad_mode
snake_case_ = compress
snake_case_ = num_lstm_layers
snake_case_ = trim_right_ratio
snake_case_ = codebook_size
snake_case_ = codebook_dim if codebook_dim is not None else hidden_size
snake_case_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}')
super().__init__(**lowerCAmelCase__)
@property
def a_ ( self) -> Optional[Any]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def a_ ( self) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length))
@property
def a_ ( self) -> int:
snake_case_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def a_ ( self) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 69
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCamelCase = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__lowerCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCamelCase = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _a )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(_a )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _a , _a )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_a )
return results
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 82
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __a):
A_ : int = ["""image_processor""", """tokenizer"""]
A_ : Union[str, Any] = """ChineseCLIPImageProcessor"""
A_ : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = kwargs.pop('feature_extractor' )
__lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
__lowerCAmelCase : int = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
__lowerCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.tokenizer.model_input_names
__lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 86
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
def UpperCAmelCase_ ( __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowercase =remove_duplicates(key.upper() )
_lowercase =len(_a )
# First fill cipher with key characters
_lowercase ={alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
_lowercase =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowercase =alphabet[i - offset]
_lowercase =char
return cipher_alphabet
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =input('''Enter message to encode or decode: ''' ).strip()
_lowercase =input('''Enter keyword: ''' ).strip()
_lowercase =input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
_lowercase ={'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
_lowercase =create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 5
|
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34
| 0
|
import os
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = os.path.join(os.path.dirname(_a ) , "num.txt" )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 184
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34
| 0
|
import numpy as np
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = len(_a ) + 1
_A : Tuple = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_A : List[Any] = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
_A : List[str] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1,_a ):
_A : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1,_a ):
_A : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1,_a ):
for j in range(1,_a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_A : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_A : Optional[int] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_A : Union[str, Any] = dp[i - 1][j]
else:
_A : int = 0
else:
_A : Optional[int] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_snake_case = "aab"
_snake_case = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 26
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __SCREAMING_SNAKE_CASE (__a ):
"""simple docstring"""
__a ="""mobilenet_v2"""
def __init__( self : List[Any] , __a : Any=3 , __a : Tuple=2_24 , __a : Dict=1.0 , __a : Union[str, Any]=8 , __a : str=8 , __a : Dict=6 , __a : Dict=32 , __a : Optional[int]=True , __a : Any=True , __a : List[Any]="relu6" , __a : Tuple=True , __a : Dict=0.8 , __a : int=0.02 , __a : List[Any]=0.001 , __a : Union[str, Any]=2_55 , **__a : List[str] , ):
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_a = num_channels
_a = image_size
_a = depth_multiplier
_a = depth_divisible_by
_a = min_depth
_a = expand_ratio
_a = output_stride
_a = first_layer_is_expansion
_a = finegrained_output
_a = hidden_act
_a = tf_padding
_a = classifier_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE (__a ):
"""simple docstring"""
__a =version.parse('1.11' )
@property
def UpperCamelCase__ ( self : str ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase__ ( self : Dict ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase__ ( self : str ):
return 1e-4
| 63
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
| 0
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=64 , A_=5 , A_=4 , A_=64 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : str = batch_size
UpperCamelCase : List[str] = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Any = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Dict = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Tuple = num_labels
UpperCamelCase : Union[str, Any] = num_choices
UpperCamelCase : List[str] = scope
def __UpperCamelCase( self ):
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Any = None
UpperCamelCase : List[str] = None
UpperCamelCase : Optional[Any] = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = MPNetModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , A_ )
UpperCamelCase : List[str] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = MPNetForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Tuple = MPNetForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.num_choices
UpperCamelCase : str = MPNetForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_labels
UpperCamelCase : int = MPNetForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Any = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = True
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = MPNetModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
UpperCamelCase : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase : Dict = model(A_ )[0]
UpperCamelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A_ )
UpperCamelCase : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
| 52
|
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34
| 0
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
_UpperCAmelCase = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =EfficientNetConfig()
SCREAMING_SNAKE_CASE_: Any =CONFIG_MAP[model_name]["""hidden_dim"""]
SCREAMING_SNAKE_CASE_: Optional[int] =CONFIG_MAP[model_name]["""width_coef"""]
SCREAMING_SNAKE_CASE_: List[str] =CONFIG_MAP[model_name]["""depth_coef"""]
SCREAMING_SNAKE_CASE_: List[Any] =CONFIG_MAP[model_name]["""image_size"""]
SCREAMING_SNAKE_CASE_: List[Any] =CONFIG_MAP[model_name]["""dropout_rate"""]
SCREAMING_SNAKE_CASE_: Dict =CONFIG_MAP[model_name]["""dw_padding"""]
SCREAMING_SNAKE_CASE_: List[str] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: Optional[Any] ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: List[str] =1000
SCREAMING_SNAKE_CASE_: Any =json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: List[Any] ={int(_a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] =idalabel
SCREAMING_SNAKE_CASE_: Dict ={v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: Optional[int] =Image.open(requests.get(_a , stream=_a ).raw )
return im
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =CONFIG_MAP[model_name]["""image_size"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] =EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_a , )
return preprocessor
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =[v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
SCREAMING_SNAKE_CASE_: int =sorted(set(_a ) )
SCREAMING_SNAKE_CASE_: Dict =len(_a )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={b: str(_a ) for b, i in zip(_a , range(_a ) )}
SCREAMING_SNAKE_CASE_: Tuple =[]
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
SCREAMING_SNAKE_CASE_: int =block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
SCREAMING_SNAKE_CASE_: Tuple ={}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE_: Any ="""efficientnet.""" + item[1]
SCREAMING_SNAKE_CASE_: List[str] ="""classifier.weight"""
SCREAMING_SNAKE_CASE_: List[str] ="""classifier.bias"""
return key_mapping
def __magic_name__ ( lowercase , lowercase , lowercase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE_: Optional[Any] =key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE_: Tuple =torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE_: List[Any] =torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE_: str =torch.from_numpy(np.transpose(_a ) )
else:
SCREAMING_SNAKE_CASE_: List[str] =torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =model_classes[model_name](
include_top=_a , weights="""imagenet""" , input_tensor=_a , input_shape=_a , pooling=_a , classes=1000 , classifier_activation="""softmax""" , )
SCREAMING_SNAKE_CASE_: int =original_model.trainable_variables
SCREAMING_SNAKE_CASE_: Optional[int] =original_model.non_trainable_variables
SCREAMING_SNAKE_CASE_: List[str] ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE_: str =param.numpy()
SCREAMING_SNAKE_CASE_: Tuple =list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE_: Optional[Any] =get_efficientnet_config(_a )
SCREAMING_SNAKE_CASE_: List[str] =EfficientNetForImageClassification(_a ).eval()
SCREAMING_SNAKE_CASE_: Tuple =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE_: int =convert_image_processor(_a )
SCREAMING_SNAKE_CASE_: Tuple =preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =hf_model(**_a )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE_: Optional[Any] =False
SCREAMING_SNAKE_CASE_: Tuple =CONFIG_MAP[model_name]["""image_size"""]
SCREAMING_SNAKE_CASE_: Dict =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE_: Union[str, Any] =image.img_to_array(_a )
SCREAMING_SNAKE_CASE_: Dict =np.expand_dims(_a , axis=0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE_: List[Any] =f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
_UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 173
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCamelCase ( __a ):
def a_ ( self) -> List[str]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = 5
# Realm tok
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname, 'realm_tokenizer')
os.makedirs(lowerCAmelCase__, exist_ok=lowerCAmelCase__)
snake_case_ = os.path.join(lowerCAmelCase__, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
snake_case_ = os.path.join(self.tmpdirname, 'realm_block_records')
os.makedirs(lowerCAmelCase__, exist_ok=lowerCAmelCase__)
def a_ ( self) -> List[str]:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'realm_tokenizer'))
def a_ ( self) -> Optional[Any]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Tuple:
snake_case_ = RealmConfig(num_block_records=self.num_block_records)
return config
def a_ ( self) -> Union[str, Any]:
snake_case_ = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def a_ ( self) -> Union[str, Any]:
snake_case_ = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
], dtype=lowerCAmelCase__, )
return block_records
def a_ ( self) -> Dict:
snake_case_ = RealmRetriever(
block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), )
return retriever
def a_ ( self) -> List[Any]:
snake_case_ = self.get_config()
snake_case_ = self.get_dummy_retriever()
snake_case_ = retriever.tokenizer
snake_case_ = np.array([0, 3], dtype='long')
snake_case_ = tokenizer(['Test question']).input_ids
snake_case_ = tokenizer(
['the fourth'], add_special_tokens=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, ).input_ids
snake_case_ = config.reader_seq_len
snake_case_ , snake_case_ , snake_case_ , snake_case_ = retriever(
lowerCAmelCase__, lowerCAmelCase__, answer_ids=lowerCAmelCase__, max_length=lowerCAmelCase__, return_tensors='np')
self.assertEqual(len(lowerCAmelCase__), 2)
self.assertEqual(len(lowerCAmelCase__), 2)
self.assertEqual(len(lowerCAmelCase__), 2)
self.assertEqual(concat_inputs.input_ids.shape, (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]), ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'], )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]), ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'], )
def a_ ( self) -> str:
snake_case_ = self.get_config()
snake_case_ = self.get_dummy_retriever()
snake_case_ = retriever.tokenizer
snake_case_ = np.array([0, 3, 5], dtype='long')
snake_case_ = tokenizer(['Test question']).input_ids
snake_case_ = tokenizer(
['the fourth', 'longer longer'], add_special_tokens=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, ).input_ids
snake_case_ = config.reader_seq_len
snake_case_ , snake_case_ , snake_case_ , snake_case_ = retriever(
lowerCAmelCase__, lowerCAmelCase__, answer_ids=lowerCAmelCase__, max_length=lowerCAmelCase__, return_tensors='np')
self.assertEqual([False, True, True], lowerCAmelCase__)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], lowerCAmelCase__)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, 'realm_block_records'))
# Test local path
snake_case_ = retriever.from_pretrained(os.path.join(self.tmpdirname, 'realm_block_records'))
self.assertEqual(retriever.block_records[0], b'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
snake_case_ = os.path.join(
os.path.join(self.tmpdirname, 'realm_block_records'), _REALM_BLOCK_RECORDS_FILENAME)
snake_case_ = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0], b'This is the first record')
| 69
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
try:
with open(_a , """rb""" ) as flax_state_f:
_lowerCAmelCase = from_bytes(_a , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_a ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_a , _a )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda snake_case : x.dtype == jnp.bfloataa , _a ) ).values()
if any(_a ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_lowerCAmelCase = jax.tree_util.tree_map(
lambda snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _a )
_lowerCAmelCase = """"""
_lowerCAmelCase = flatten_dict(_a , sep=""".""" )
_lowerCAmelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
_lowerCAmelCase = []
_lowerCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_lowerCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""]
_lowerCAmelCase = jnp.transpose(_a , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_lowerCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""]
_lowerCAmelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_lowerCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_a ):
_lowerCAmelCase = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_lowerCAmelCase = """.""".join(_a )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
_lowerCAmelCase = np.asarray(_a ) if not isinstance(_a , np.ndarray ) else flax_tensor
_lowerCAmelCase = torch.from_numpy(_a )
# remove from missing keys
missing_keys.remove(_a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_a )
pt_model.load_state_dict(_a )
# re-transform missing_keys to list
_lowerCAmelCase = list(_a )
if len(_a ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(_a ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
return pt_model
| 82
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : List[str] ):
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase = 1_2
UpperCAmelCase = 1_2
elif "14-14" in model_name:
UpperCAmelCase = 1_4
UpperCAmelCase = 1_4
elif "16-16" in model_name:
UpperCAmelCase = 1_6
UpperCAmelCase = 1_6
else:
raise ValueError('''Model not supported''' )
UpperCAmelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCAmelCase = 3_5
UpperCAmelCase = '''speech-commands-v2-id2label.json'''
else:
UpperCAmelCase = 5_2_7
UpperCAmelCase = '''audioset-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (_a : Tuple ):
if "module.v" in name:
UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def snake_case_ (_a : Dict , _a : List[Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ):
UpperCAmelCase = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCAmelCase = dataset[0]['''audio''']['''array''']
else:
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ ( __a):
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'embed_dim' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_heads' ) )
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[16, 48, 96] , _SCREAMING_SNAKE_CASE=[1, 3, 6] , _SCREAMING_SNAKE_CASE=[1, 2, 10] , _SCREAMING_SNAKE_CASE=[7, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 2] , _SCREAMING_SNAKE_CASE=[False, False, True] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : str = image_size
__lowerCAmelCase : List[Any] = patch_sizes
__lowerCAmelCase : List[str] = patch_stride
__lowerCAmelCase : Optional[int] = patch_padding
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : Optional[int] = num_labels
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : str = embed_dim
__lowerCAmelCase : Dict = num_heads
__lowerCAmelCase : Any = stride_kv
__lowerCAmelCase : Tuple = depth
__lowerCAmelCase : str = cls_token
__lowerCAmelCase : str = attention_drop_rate
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = CvtModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase : int = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowerCAmelCase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowerCAmelCase : List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : List[Any] = CvtForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = config_and_inputs
__lowerCAmelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase):
A_ : Dict = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
A_ : Optional[int] = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
A_ : List[str] = False
A_ : Any = False
A_ : Optional[int] = False
A_ : Any = False
A_ : str = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = CvtModelTester(self )
__lowerCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : List[str] = [*signature.parameters.keys()]
__lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Dict = outputs.hidden_states
__lowerCAmelCase : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Tuple = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[str] = CvtModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.default_image_processor
__lowerCAmelCase : List[Any] = prepare_img()
__lowerCAmelCase : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 86
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( __a):
def __init__(self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ) -> Tuple:
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
_lowercase =load_from_cache_file
_lowercase =file_format
_lowercase =Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def __A (self ) -> Optional[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowercase =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 5
|
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34
| 0
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A : Optional[int] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
A : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A : List[str] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A : Optional[int] = sorted(arg_to_scheduler.keys())
A : Optional[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class _lowercase ( pl.LightningModule):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict="base" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__lowerCamelCase )
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Optional[int] = Path(self.hparams.output_dir )
lowerCamelCase__ : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=__lowerCamelCase , **__lowerCamelCase , )
else:
lowerCamelCase__ : str = config
lowerCamelCase__ : Dict = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , __lowerCamelCase , __lowerCamelCase ):
assert hasattr(self.config , __lowerCamelCase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , __lowerCamelCase , getattr(self.hparams , __lowerCamelCase ) )
if tokenizer is None:
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__lowerCamelCase , )
else:
lowerCamelCase__ : Tuple = tokenizer
lowerCamelCase__ : Dict = MODEL_MODES[mode]
if model is None:
lowerCamelCase__ : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__lowerCamelCase , )
else:
lowerCamelCase__ : Any = model
def lowerCAmelCase ( self : List[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_type.from_pretrained(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
lowerCamelCase__ : Tuple = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowerCamelCase__ : Union[str, Any] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : str = self.model
lowerCamelCase__ : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCamelCase__ : str = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
lowerCamelCase__ : Tuple = Adafactor(
__lowerCamelCase , lr=self.hparams.learning_rate , scale_parameter=__lowerCamelCase , relative_step=__lowerCamelCase )
else:
lowerCamelCase__ : Optional[Any] = AdamW(
__lowerCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowerCamelCase__ : Optional[int] = optimizer
lowerCamelCase__ : Union[str, Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
'''simple docstring'''
return self.validation_step(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
return self.validation_end(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowerCamelCase__ : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Any ):
'''simple docstring'''
if stage == "test":
lowerCamelCase__ : Tuple = len(self.test_dataloader().dataset )
else:
lowerCamelCase__ : str = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = len(self.train_dataloader().dataset )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : bool = False ):
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=__lowerCamelCase )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
__lowerCamelCase , list(filter(__lowerCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Dict[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.output_dir.joinpath("best_tfmr" )
lowerCamelCase__ : Any = self.step_count
self.model.save_pretrained(__lowerCamelCase )
self.tokenizer.save_pretrained(__lowerCamelCase )
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=__lowerCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=__lowerCamelCase , type=__lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "cache" ) , type=__lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=__lowerCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=__lowerCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=__lowerCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=__lowerCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=__lowerCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=__lowerCamelCase , metavar=__lowerCamelCase , type=__lowerCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=__lowerCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=__lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=__lowerCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=__lowerCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=__lowerCamelCase )
parser.add_argument("--train_batch_size" , default=32 , type=__lowerCamelCase )
parser.add_argument("--eval_batch_size" , default=32 , type=__lowerCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class _lowercase ( pl.Callback):
"""simple docstring"""
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowercase ( pl.Callback):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__lowerCamelCase )
class _lowercase ( pl.Callback):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : str = trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase__ : Optional[Any] = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__lowerCamelCase )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
lowerCamelCase__ : Tuple = trainer.callback_metrics
# Log results
for key in sorted(__lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(__lowerCamelCase , str(metrics[key] ) ) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info("***** Test results *****" )
lowerCamelCase__ : Tuple = trainer.callback_metrics
# Log and save results to file
lowerCamelCase__ : Dict = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(__lowerCamelCase , "w" ) as writer:
for key in sorted(__lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(__lowerCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(__lowerCamelCase , str(metrics[key] ) ) )
def lowercase_ ( _A : int , _A : Optional[Any] ):
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(_a ).parent / "test_run" / "model_checkpoints" ) , type=_a , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_a , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_a )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_a , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_a , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_a , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_a ).parent / "test_run" / "dummy-train-data" ) , type=_a , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def lowercase_ ( _A : BaseTransformer , _A : argparse.Namespace , _A : List[Any]=None , _A : Tuple=True , _A : int=[] , _A : Any=None , _A : int=None , **_A : Optional[Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
lowerCamelCase__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
lowerCamelCase__ : Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
lowerCamelCase__ : int = LoggingCallback()
lowerCamelCase__ : List[Any] = {}
if args.fpaa:
lowerCamelCase__ : Union[str, Any] = 16
if args.gpus > 1:
lowerCamelCase__ : Tuple = "auto"
lowerCamelCase__ : List[str] = "ddp"
lowerCamelCase__ : Any = args.accumulate_grad_batches
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[str] = "auto"
lowerCamelCase__ : Any = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 184
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( __a):
"""simple docstring"""
UpperCamelCase__ = (DEISMultistepScheduler,)
UpperCamelCase__ = (("""num_inference_steps""", 25),)
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**UpperCAmelCase )
return config
def UpperCamelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**UpperCAmelCase )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , UpperCAmelCase=None , **UpperCAmelCase ):
"""simple docstring"""
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**UpperCAmelCase )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**UpperCAmelCase )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop('num_inference_steps' , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , 'set_timesteps' ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=UpperCAmelCase )
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=UpperCAmelCase )
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='deis' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCamelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCamelCase ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type='v_prediction' )
_UpperCAmelCase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**UpperCAmelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 39
|
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowercase ( __a ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> int:
_A : Any = parent
_A : Optional[Any] = batch_size
_A : int = seq_length
_A : List[Any] = is_training
_A : List[Any] = use_input_mask
_A : Optional[int] = use_token_type_ids
_A : Tuple = use_labels
_A : Optional[Any] = vocab_size
_A : Dict = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : int = intermediate_size
_A : int = hidden_act
_A : Dict = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : List[str] = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : str = initializer_range
_A : List[str] = num_labels
_A : str = num_choices
_A : str = scope
def a__ ( self ) -> Optional[Any]:
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[int] = None
if self.use_input_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_A : Union[str, Any] = None
_A : Union[str, Any] = None
_A : List[str] = None
if self.use_labels:
_A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = ids_tensor([self.batch_size] , self.num_choices )
_A : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : Any = DistilBertModel(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , _a )
_A : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Any:
_A : Union[str, Any] = DistilBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
_A : Dict = DistilBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_A : Any = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
_A : Optional[Any] = self.num_labels
_A : Tuple = DistilBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> List[str]:
_A : Dict = self.num_labels
_A : Tuple = DistilBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
_A : Optional[Any] = self.num_choices
_A : Optional[Any] = DistilBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_A : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Dict = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) : Union[str, Any] = config_and_inputs
_A : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( __a,__a,unittest.TestCase ):
_a = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_a = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = True
_a = True
_a = True
def a__ ( self ) -> Union[str, Any]:
_A : int = DistilBertModelTester(self )
_A : Optional[int] = ConfigTester(self , config_class=_a , dim=37 )
def a__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_a )
def a__ ( self ) -> int:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_a )
def a__ ( self ) -> List[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_a )
def a__ ( self ) -> Dict:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_a )
def a__ ( self ) -> str:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_a )
def a__ ( self ) -> Dict:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_a )
@slow
def a__ ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[int] = DistilBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_A : Dict = True
_A : str = model_class(config=_a )
_A : Dict = self._prepare_for_class(_a , _a )
_A : Optional[Any] = torch.jit.trace(
_a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , """traced_model.pt""" ) )
_A : Optional[int] = torch.jit.load(os.path.join(_a , """traced_model.pt""" ) , map_location=_a )
loaded(inputs_dict["""input_ids"""].to(_a ) , inputs_dict["""attention_mask"""].to(_a ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Union[str, Any]:
_A : Dict = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_A : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_A : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A : Any = model(_a , attention_mask=_a )[0]
_A : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
_A : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 26
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A =logging.getLogger(__name__)
A ='Hello world! cécé herlolip'
A =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case_ (_a : List[Any] , _a : Any ):
UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase = torch.load(_a , lambda _a , _a : storage )
UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a )
original.eval()
UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase = encoder_input_ids
UpperCAmelCase = decoder_input_ids
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0]
UpperCAmelCase = original.generator(_a )
UpperCAmelCase = new_model(
_a , _a , _a , _a , _a )[0]
UpperCAmelCase = new_model.generator(_a )
UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 34
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE (__a , __a , unittest.TestCase ):
"""simple docstring"""
__a =IFInpaintingPipeline
__a =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a =PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self : Union[str, Any] ):
return self._get_dummy_components()
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Tuple=0 ):
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase__ ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self : List[Any] ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase__ ( self : Dict ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase__ ( self : str ):
self._test_save_load_local()
def UpperCamelCase__ ( self : int ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 63
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = "▁"
__a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__a = {
"google/pegasus-xsum": 5_12,
}
class lowerCamelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = PegasusTokenizer
_A : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self: List[Any] , snake_case: Optional[int]=None , snake_case: Dict=None , snake_case: str="<pad>" , snake_case: Union[str, Any]="</s>" , snake_case: List[Any]="<unk>" , snake_case: Tuple="<mask_2>" , snake_case: Any="<mask_1>" , snake_case: List[Any]=None , snake_case: Any=103 , **snake_case: Dict , ) -> Optional[Any]:
snake_case_ :str = offset
if additional_special_tokens is not None:
if not isinstance(snake_case , snake_case ):
raise TypeError(
f"""additional_special_tokens should be of type {type(snake_case )}, but is"""
f""" {type(snake_case )}""" )
snake_case_ :Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(snake_case ) , self.offset - 1 )
]
if len(set(snake_case ) ) != len(snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
snake_case_ :Tuple = additional_special_tokens_extended
else:
snake_case_ :Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
snake_case , tokenizer_file=snake_case , pad_token=snake_case , eos_token=snake_case , unk_token=snake_case , mask_token=snake_case , mask_token_sent=snake_case , offset=snake_case , additional_special_tokens=snake_case , **snake_case , )
snake_case_ :Union[str, Any] = vocab_file
snake_case_ :Union[str, Any] = False if not self.vocab_file else True
def lowerCAmelCase_ ( self: List[Any] , snake_case: Tuple ) -> int:
snake_case_ :Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self: List[Any] , snake_case: List , snake_case: Optional[List] = None , snake_case: bool = False ) -> Optional[Any]:
if already_has_special_tokens:
return self._special_token_mask(snake_case )
elif token_ids_a is None:
return self._special_token_mask(snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self: List[str] , snake_case: Union[str, Any] , snake_case: List[Any]=None ) -> Dict:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: Optional[str] = None ) -> Dict:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Union[str, Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 66
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
A ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def snake_case_ (_a : str ):
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , )
return preprocessor
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase = sorted(set(_a ) )
UpperCAmelCase = len(_a )
UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )}
UpperCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = '''efficientnet.''' + item[1]
UpperCAmelCase = '''classifier.weight'''
UpperCAmelCase = '''classifier.bias'''
return key_mapping
def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(_a ) )
else:
UpperCAmelCase = torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ):
UpperCAmelCase = model_classes[model_name](
include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(_a )
UpperCAmelCase = EfficientNetForImageClassification(_a ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase = rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(_a )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**_a )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(_a )
UpperCAmelCase = np.expand_dims(_a , axis=0 )
UpperCAmelCase = original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
A =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34
| 0
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : Optional[Any] = re.compile(r"""\s+""")
def A_ ( _lowerCAmelCase ) -> Optional[int]:
return {"hash": hashlib.mda(re.sub(_a , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Dict = [len(_a ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_a ), "line_max": max(_a )}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase , _lowerCAmelCase=5 ) -> Optional[Any]:
UpperCamelCase : Tuple = ["auto-generated", "autogenerated", "automatically generated"]
UpperCamelCase : List[str] = example["content"].splitlines()
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 ) -> str:
UpperCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
UpperCamelCase : Tuple = example["content"].splitlines()
UpperCamelCase : Dict = 0
UpperCamelCase : Union[str, Any] = 0
# first test
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase : Union[str, Any] = example["content"].count("\n" )
UpperCamelCase : str = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : str = ["def ", "class ", "for ", "while "]
UpperCamelCase : str = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase , _lowerCAmelCase=4 ) -> str:
UpperCamelCase : Optional[Any] = example["content"].splitlines()
UpperCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_a )["input_ids"]
UpperCamelCase : int = len(example["content"] ) / len(_a )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : int = {}
results.update(get_hash(_a ) )
results.update(line_stats(_a ) )
results.update(alpha_stats(_a ) )
results.update(char_token_ratio(_a ) )
results.update(is_autogenerated(_a ) )
results.update(is_config_or_test(_a ) )
results.update(has_no_keywords(_a ) )
results.update(has_few_assignments(_a ) )
return results
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if not check_uniques(_a , _a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase ) -> List[Any]:
with open(_a , "rb" ) as f_in:
with gzip.open(str(_a ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_a , _a )
os.unlink(_a )
# Settings
__lowerCamelCase : List[str] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : Any = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : Dict = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : str = time.time()
__lowerCamelCase : Optional[int] = load_dataset(args.dataset_name, split="""train""")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase : str = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__lowerCamelCase : str = set(ds.unique("""hash"""))
__lowerCamelCase : Union[str, Any] = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__lowerCamelCase : Union[str, Any] = time.time()
__lowerCamelCase : Dict = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : Any = time.time()
__lowerCamelCase , __lowerCamelCase : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__lowerCamelCase : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : Optional[Any] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Optional[int] = str(data_dir / f"""file-{file_number+1:012}.json""")
__lowerCamelCase : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 52
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _a ( __a ):
__a : str = ["""vqvae"""]
def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowercase ) else 1_000
@torch.no_grad()
def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ):
'''simple docstring'''
UpperCAmelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase , device=self.device , )
UpperCAmelCase = noise
UpperCAmelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase , lowercase )
UpperCAmelCase = self.mel.audio_slice_to_image(lowercase )
UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase = (input_image / 255) * 2 - 1
UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample(
generator=lowercase )[0]
UpperCAmelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase = int(mask_start_secs * pixels_per_second )
UpperCAmelCase = int(mask_end_secs * pixels_per_second )
UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase ):
UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample''']
else:
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
if isinstance(self.scheduler , lowercase ):
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample''']
else:
UpperCAmelCase = self.scheduler.step(
model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCAmelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase = self.vqvae.decode(lowercase )['''sample''']
UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase = (images * 255).round().astype('''uint8''' )
UpperCAmelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) )
UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) )
@torch.no_grad()
def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowercase )
self.scheduler.set_timesteps(lowercase )
UpperCAmelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase = (sample / 255) * 2 - 1
UpperCAmelCase = torch.Tensor(lowercase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase = self.scheduler.alphas_cumprod[t]
UpperCAmelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = self.unet(lowercase , lowercase )['''sample''']
UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ):
'''simple docstring'''
UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
| 34
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Dict:
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase__))
snake_case_ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> str:
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Tuple:
snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs]
return image_inputs
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_slow.save_pretrained(self.tmpdirname)
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_fast.save_pretrained(self.tmpdirname)
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__)
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__)
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> Dict:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Dict:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> List[str]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 69
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34
| 0
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A__ = 2
class __lowerCAmelCase :
def __init__( self , *, # begin keyword-only arguments
_snake_case="<s>" , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case=None , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = bos, unk, pad, eos
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {}
_lowerCAmelCase = self.add_symbol(_snake_case )
_lowerCAmelCase = self.add_symbol(_snake_case )
_lowerCAmelCase = self.add_symbol(_snake_case )
_lowerCAmelCase = self.add_symbol(_snake_case )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_snake_case )
_lowerCAmelCase = len(self.symbols )
def __eq__( self , _snake_case ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , _snake_case ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , _snake_case ):
"""simple docstring"""
return sym in self.indices
@classmethod
def snake_case ( cls , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = cls()
d.add_from_file(_snake_case )
return d
def snake_case ( self , _snake_case , _snake_case=1 , _snake_case=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_lowerCAmelCase = self.indices[word]
_lowerCAmelCase = self.count[idx] + n
return idx
else:
_lowerCAmelCase = len(self.symbols )
_lowerCAmelCase = idx
self.symbols.append(_snake_case )
self.count.append(_snake_case )
return idx
def snake_case ( self , _snake_case ):
"""simple docstring"""
return 0
def snake_case ( self , _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
try:
with open(_snake_case , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_snake_case ) )
return
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = self._load_meta(_snake_case )
for line in lines[indices_start_line:]:
try:
_lowerCAmelCase , _lowerCAmelCase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
_lowerCAmelCase = True
_lowerCAmelCase , _lowerCAmelCase = line.rsplit(""" """ , 1 )
else:
_lowerCAmelCase = False
_lowerCAmelCase = int(_snake_case )
_lowerCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: \'{}\'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_snake_case ) )
self.add_symbol(_snake_case , n=_snake_case , overwrite=_snake_case )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt> [flags]\'""" )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict((re.sub(R"""@@$""" , """""" , _a ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , _a ), v) for k, v in d.items() )
_lowerCAmelCase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_lowerCAmelCase = d[k] # restore
return da
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
if not os.path.exists(_a ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(_a , exist_ok=_a )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_lowerCAmelCase = os.path.join(_a , """checkpoint.pt""" )
if not os.path.isfile(_a ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_lowerCAmelCase = torch.load(_a , map_location="""cpu""" )
_lowerCAmelCase = chkpt["""cfg"""]["""model"""]
# dicts
_lowerCAmelCase = os.path.join(_a , """dict.txt""" )
if not os.path.isfile(_a ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_lowerCAmelCase = Dictionary.load(_a )
_lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
_lowerCAmelCase = len(_a )
_lowerCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# merges_file (bpecodes)
_lowerCAmelCase = os.path.join(_a , """bpecodes""" )
if not os.path.isfile(_a ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_lowerCAmelCase = os.path.join(_a , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(_a , _a )
# model config
_lowerCAmelCase = os.path.join(_a , """config.json""" )
_lowerCAmelCase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# tokenizer config
_lowerCAmelCase = os.path.join(_a , _a )
_lowerCAmelCase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# model
_lowerCAmelCase = chkpt["""model"""]
# remove unneeded keys
_lowerCAmelCase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(_a , _a )
_lowerCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
_lowerCAmelCase = model_state_dict.pop(_a )
else:
_lowerCAmelCase = model_state_dict.pop(_a )
_lowerCAmelCase = BioGptConfig.from_pretrained(_a )
_lowerCAmelCase = BioGptForCausalLM(_a )
# check that it loads ok
model_new.load_state_dict(_a )
# save
_lowerCAmelCase = os.path.join(_a , _a )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(_a , _a )
print("""Conversion is done!""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 82
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34
| 0
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCamelCase__ = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
lowerCamelCase__ = """path-to-your-trained-model"""
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__ = pipe.to(device)
# to channels last
lowerCamelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCamelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCamelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCamelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCamelCase__ = torch.randn(2, 4, 64, 64)
lowerCamelCase__ = torch.rand(1) * 999
lowerCamelCase__ = torch.randn(2, 77, 768)
lowerCamelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCamelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCamelCase__ = 666
lowerCamelCase__ = torch.Generator(device).manual_seed(seed)
lowerCamelCase__ = {"""generator""": generator}
if args.steps is not None:
lowerCamelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCamelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 86
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5
|
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34
| 0
|
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[str] = str(_a )
while len(_a ) != 1:
lowerCamelCase__ : Tuple = [int(_a ) for i in num_string]
lowerCamelCase__ : List[Any] = 1
for i in range(0 , len(_a ) ):
total *= numbers[i]
lowerCamelCase__ : Tuple = str(_a )
steps += 1
return steps
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Optional[int] = str(_a )
while len(_a ) != 1:
lowerCamelCase__ : str = [int(_a ) for i in num_string]
lowerCamelCase__ : Any = 0
for i in range(0 , len(_a ) ):
total += numbers[i]
lowerCamelCase__ : int = str(_a )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( __a ):
__a : Union[str, Any] = """encodec"""
def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowercase )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 34
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __lowerCamelCase ( __a):
"""simple docstring"""
UpperCamelCase__ = """falcon"""
UpperCamelCase__ = ["""past_key_values"""]
def __init__( self , UpperCAmelCase=6_5024 , UpperCAmelCase=4544 , UpperCAmelCase=32 , UpperCAmelCase=71 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=11 , UpperCAmelCase=11 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('n_embed' , UpperCAmelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return not self.alibi
| 39
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase ( __a ):
_a = CustomTokenizer
pass
| 26
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__a , __a , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionSAGPipeline
__a =TEXT_TO_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_BATCH_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
__a =False
def UpperCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(__a )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self : List[str] , __a : Dict , __a : Optional[int]=0 ):
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_a = sag_pipe.to(__a )
sag_pipe.set_progress_bar_config(disable=__a )
_a = "."
_a = torch.manual_seed(0 )
_a = sag_pipe(
[prompt] , generator=__a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self : Dict ):
_a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_a = sag_pipe.to(__a )
sag_pipe.set_progress_bar_config(disable=__a )
_a = "."
_a = torch.manual_seed(0 )
_a = sag_pipe(
[prompt] , generator=__a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self : Tuple ):
_a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_a = sag_pipe.to(__a )
sag_pipe.set_progress_bar_config(disable=__a )
_a = "."
_a = torch.manual_seed(0 )
_a = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
_a = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 63
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34
| 0
|
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self: int ) -> Tuple:
snake_case_ :Optional[Any] = {}
def lowerCAmelCase_ ( self: Tuple ) -> int:
print(self.vertex )
for i in self.vertex:
print(snake_case , """ -> """ , """ -> """.join([str(snake_case ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self: str , snake_case: int , snake_case: int ) -> Optional[int]:
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case )
else:
# else make a new vertex
snake_case_ :List[str] = [to_vertex]
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_ :int = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: int , snake_case: list ) -> int:
snake_case_ :str = True
print(snake_case , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
if __name__ == "__main__":
__a = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 66
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A =logging.get_logger(__name__)
A ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __a ):
__a : List[Any] = """marian"""
__a : Union[str, Any] = ["""past_key_values"""]
__a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
class _a ( __a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : int ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(lowercase , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(lowercase ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(lowercase , lowercase )
UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase )
UpperCAmelCase = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
| 34
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCamelCase : List[Any] = get_logger(__name__)
class A__ ( enum.Enum ):
_UpperCAmelCase :Union[str, Any] = """all_checks"""
_UpperCAmelCase :Tuple = """basic_checks"""
_UpperCAmelCase :Tuple = """no_checks"""
class A__ ( __a ):
pass
class A__ ( __a ):
pass
class A__ ( __a ):
pass
class A__ ( __a ):
pass
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Tuple:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_a ) - set(_a ) ) )
UpperCamelCase : Dict = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase : List[Any] = " for " + verification_name if verification_name is not None else ""
if len(_a ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class A__ ( __a ):
pass
class A__ ( __a ):
pass
class A__ ( __a ):
pass
class A__ ( __a ):
pass
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreSplits(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedSplits(str(set(_a ) - set(_a ) ) )
UpperCamelCase : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a ) > 0:
raise NonMatchingSplitsSizesError(str(_a ) )
logger.info("All the splits matched successfully." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True ) -> Union[str, Any]:
if record_checksum:
UpperCamelCase : str = shaaaa()
with open(_a , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(_a )
UpperCamelCase : str = m.hexdigest()
else:
UpperCamelCase : List[Any] = None
return {"num_bytes": os.path.getsize(_a ), "checksum": checksum}
def A_ ( _lowerCAmelCase ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 52
|
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34
| 0
|
"""simple docstring"""
import re
import subprocess
import sys
_UpperCAmelCase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_UpperCAmelCase = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
_UpperCAmelCase = """|""".join(sys.argv[1:])
_UpperCAmelCase = re.compile(rf"""^({joined_dirs}).*?\.py$""")
_UpperCAmelCase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 173
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = NystromformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
_lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = NystromformerForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = NystromformerForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = NystromformerForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = NystromformerForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = NystromformerForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
__lowerCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = NystromformerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = NystromformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
_lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCAmelCase = model(_snake_case )[0]
_lowerCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _snake_case )
_lowerCAmelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """the [MASK] of Belgium is Brussels"""
_lowerCAmelCase = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
_lowerCAmelCase = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
_lowerCAmelCase = tokenizer(_snake_case , return_tensors="""pt""" )
with torch.no_grad():
_lowerCAmelCase = model(encoding.input_ids ).logits
_lowerCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_snake_case ) , """capital""" )
| 82
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : List[str] ):
UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
UpperCAmelCase = 1_2
UpperCAmelCase = 1_2
elif "14-14" in model_name:
UpperCAmelCase = 1_4
UpperCAmelCase = 1_4
elif "16-16" in model_name:
UpperCAmelCase = 1_6
UpperCAmelCase = 1_6
else:
raise ValueError('''Model not supported''' )
UpperCAmelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
UpperCAmelCase = 3_5
UpperCAmelCase = '''speech-commands-v2-id2label.json'''
else:
UpperCAmelCase = 5_2_7
UpperCAmelCase = '''audioset-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ (_a : Tuple ):
if "module.v" in name:
UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def snake_case_ (_a : Dict , _a : List[Any] ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(_a )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = config.hidden_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_a , _a )
@torch.no_grad()
def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ):
UpperCAmelCase = get_audio_spectrogram_transformer_config(_a )
UpperCAmelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )
# remove some keys
remove_keys(_a )
# rename some keys
UpperCAmelCase = convert_state_dict(_a , _a )
# load 🤗 model
UpperCAmelCase = ASTForAudioClassification(_a )
model.eval()
model.load_state_dict(_a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a )
if "speech-commands" in model_name:
UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
UpperCAmelCase = dataset[0]['''audio''']['''array''']
else:
UpperCAmelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a )
UpperCAmelCase = waveform.squeeze().numpy()
UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**_a )
UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 0
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
__lowerCAmelCase : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowerCAmelCase : Any = 1 - (matter_density + radiation_density + dark_energy)
__lowerCAmelCase : Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCAmelCase : Tuple = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 86
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase__ = ['''bert-base-uncased''', '''bert-base-cased''']
UpperCAmelCase__ = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class lowerCamelCase__ ( tf.keras.Model):
def __init__(self , UpperCAmelCase ) -> List[Any]:
super().__init__()
_lowercase =tokenizer
_lowercase =AutoConfig.from_pretrained(UpperCAmelCase )
_lowercase =TFAutoModel.from_config(UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =self.tokenizer(UpperCAmelCase )
_lowercase =self.bert(**UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> int:
super().setUp()
_lowercase =[
BertTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowercase =[TFBertTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCAmelCase , use_fast_bert_tokenizer=UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowercase =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_lowercase =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A (self ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowercase =tokenizer(UpperCAmelCase , return_tensors='''tf''' , padding='''longest''' )
_lowercase =tf_tokenizer(UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __A (self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowercase =tf_tokenizer(self.paired_sentences )
_lowercase =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __A (self ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
_lowercase =tf.function(UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowercase =tf.constant(UpperCAmelCase )
_lowercase =compiled_tokenizer(UpperCAmelCase )
_lowercase =tf_tokenizer(UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A (self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
_lowercase =ModelToSave(tokenizer=UpperCAmelCase )
_lowercase =tf.convert_to_tensor(self.test_sentences )
_lowercase =model(UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowercase =Path(UpperCAmelCase ) / '''saved.model'''
model.save(UpperCAmelCase )
_lowercase =tf.keras.models.load_model(UpperCAmelCase )
_lowercase =loaded_model(UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 5
|
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.