code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class lowerCAmelCase_ ( Generic[KEY, VAL] ):
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class lowerCAmelCase_ ( _Item ):
def __init__( self ) -> None:
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class lowerCAmelCase_ ( MutableMapping[KEY, VAL] ):
def __init__( self, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = 0.75 ) -> None:
UpperCamelCase : Union[str, Any] = initial_block_size
UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCamelCase : List[str] = capacity_factor
UpperCamelCase : Tuple = 0
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return hash(SCREAMING_SNAKE_CASE_ ) % len(self._buckets )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return (ind + 1) % len(self._buckets )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
UpperCamelCase : Tuple = self._buckets[ind]
if not stored:
UpperCamelCase : Optional[int] = _Item(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self._len += 1
return True
elif stored.key == key:
UpperCamelCase : Optional[int] = _Item(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return True
else:
return False
def snake_case_ ( self ) -> bool:
UpperCamelCase : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCamelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = self._buckets
UpperCamelCase : str = [None] * new_size
UpperCamelCase : Union[str, Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def snake_case_ ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def snake_case_ ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Iterator[int]:
UpperCamelCase : List[str] = self._get_bucket_index(SCREAMING_SNAKE_CASE_ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCamelCase : Any = self._get_next_ind(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
if self._try_set(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
break
def __setitem__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __delitem__( self, SCREAMING_SNAKE_CASE_ ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE_ )
if item is _deleted:
continue
if item.key == key:
UpperCamelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, SCREAMING_SNAKE_CASE_ ) -> VAL:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
UpperCamelCase : Optional[int] = ' ,'.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 40 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 0 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 41 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
lowerCamelCase_ = [2, 1, 2, -1]
lowerCamelCase_ = [1, 2, 3, 4]
def UpperCamelCase( self ) -> list[float]:
'''simple docstring'''
lowerCamelCase_ = len(self.first_signal )
lowerCamelCase_ = len(self.second_signal )
lowerCamelCase_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create a zero matrix of max_length x max_length
lowerCamelCase_ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE_ )
for j, item in enumerate(SCREAMING_SNAKE_CASE_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase_ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 42 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = len(_lowerCAmelCase ) + 1
_lowerCamelCase : Dict = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCamelCase : List[Any] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
_lowerCamelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
_lowerCamelCase : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
_lowerCamelCase : List[Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCamelCase : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCamelCase : Tuple = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCamelCase : Tuple = dp[i - 1][j]
else:
_lowerCamelCase : Dict = 0
else:
_lowerCamelCase : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase_ : Optional[Any] = 'aab'
UpperCAmelCase_ : List[str] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''') | 44 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
from __future__ import annotations
UpperCamelCase = 8.988E9 # units = N * m^s * C^-2
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
UpperCamelCase__ :Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
UpperCamelCase__ :Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCamelCase__ :List[Any] = abs(lowercase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCamelCase__ :int = abs(lowercase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCamelCase__ :Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowercase__ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=_lowerCamelCase )
_lowerCamelCase : Dict = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
TestCommand.register_subcommand(_lowerCamelCase )
RunBeamCommand.register_subcommand(_lowerCamelCase )
DummyDataCommand.register_subcommand(_lowerCamelCase )
# Parse args
_lowerCamelCase, _lowerCamelCase : Tuple = parser.parse_known_args()
if not hasattr(_lowerCamelCase , "func" ):
parser.print_help()
exit(1 )
_lowerCamelCase : int = parse_unknown_args(_lowerCamelCase )
# Run
_lowerCamelCase : Optional[int] = args.func(_lowerCamelCase , **_lowerCamelCase )
service.run()
if __name__ == "__main__":
main() | 46 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCAmelCase__ : str = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Dict , *__magic_name__ : Optional[int] , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = eval_examples
lowerCAmelCase__ = post_process_function
lowerCAmelCase__ = quant_trainer_args
lowerCAmelCase__ = 128 # default number of calibration samples
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
lowerCAmelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ = self._remove_unused_columns(__magic_name__ , description="Calibration" )
return DataLoader(
__magic_name__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ = self.get_calib_dataloader(__magic_name__ )
lowerCAmelCase__ = self.model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args , calib=__magic_name__ )
model.eval()
quant_trainer.enable_calibration(__magic_name__ )
logger.info("***** Running calibration *****" )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(__magic_name__ ):
# Prediction step
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = self.prediction_step(__magic_name__ , __magic_name__ , prediction_loss_only=__magic_name__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__magic_name__ , self.quant_trainer_args )
lowerCAmelCase__ = model
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Dict=None , __magic_name__ : str = "eval" ):
"""simple docstring"""
lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(__magic_name__ )
lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
__magic_name__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions )
lowerCAmelCase__ = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase__ = metrics.pop(__magic_name__ )
self.log(__magic_name__ )
else:
lowerCAmelCase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Any=None , __magic_name__ : str = "test" ):
"""simple docstring"""
lowerCAmelCase__ = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
__magic_name__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions , "predict" )
lowerCAmelCase__ = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase__ = metrics.pop(__magic_name__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any]="./" ):
"""simple docstring"""
lowerCAmelCase__ = self.eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(__magic_name__ )
lowerCAmelCase__ = next(iter(__magic_name__ ) )
# saving device - to make it consistent
lowerCAmelCase__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
lowerCAmelCase__ = tuple(v.to(__magic_name__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ = True
lowerCAmelCase__ = self.model.to(__magic_name__ )
model.eval()
model.float()
lowerCAmelCase__ = model.module if hasattr(__magic_name__ , "module" ) else model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args )
lowerCAmelCase__ = os.path.join(__magic_name__ , "model.onnx" )
logger.info(f"""exporting model to {output_model_file}""" )
lowerCAmelCase__ = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
__magic_name__ , __magic_name__ , __magic_name__ , export_params=__magic_name__ , opset_version=13 , do_constant_folding=__magic_name__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=__magic_name__ , )
logger.info("onnx export finished" )
| 48 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase : Optional[int] = 10
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 1
lowerCamelCase__ = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase__ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
lowerCamelCase__ = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
lowerCamelCase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 0 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a__ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
a__ : Optional[Any] = logging.WARNING
def __snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = os.getenv('''DATASETS_VERBOSITY''' , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __snake_case ( ) -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def __snake_case ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
UpperCAmelCase = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = False
def __snake_case ( ) -> None:
"""simple docstring"""
UpperCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , *a__ : str , **a__ : Optional[int] ): # pylint: disable=unused-argument
UpperCAmelCase = args[0] if args else None
def __iter__( self : List[str] ):
return iter(self._iterator )
def __getattr__( self : Optional[int] , a__ : str ):
def empty_fn(*a__ : str , **a__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ):
return self
def __exit__( self : List[Any] , a__ : Dict , a__ : str , a__ : List[Any] ):
return
a__ : Optional[Any] = True
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self : Any , *a__ : int , a__ : Any=False , **a__ : Union[str, Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a__ , **a__ )
else:
return EmptyTqdm(*a__ , **a__ )
def __snake_case ( self : str , *a__ : Tuple , **a__ : Union[str, Any] ):
UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a__ , **a__ )
def __snake_case ( self : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a__ : Union[str, Any] = _tqdm_cls()
def __snake_case ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase = True
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase = False
| 51 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __A ( a_ :Optional[int] , a_ :List[str] , a_ :Any=None , a_ :Tuple=None , a_ :int=None , a_ :Optional[Any]=None , a_ :int=None , a_ :str=None , ) -> Tuple:
if attention_mask is None:
__a : Dict = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
__a : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
__a : str = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__a : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__a : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.0_2 , ):
__a : Union[str, Any] = parent
__a : Optional[int] = batch_size
__a : Union[str, Any] = seq_length
__a : Tuple = is_training
__a : Union[str, Any] = use_labels
__a : Optional[int] = vocab_size
__a : Tuple = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Any = intermediate_size
__a : str = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : str = eos_token_id
__a : Tuple = pad_token_id
__a : List[str] = bos_token_id
__a : Optional[int] = initializer_range
def _lowerCamelCase ( self ):
__a : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : int = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__a : Tuple = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self ):
__a , __a : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = 20
__a : List[Any] = model_class_name(_UpperCAmelCase )
__a : int = model.encode(inputs_dict['''input_ids'''] )
__a , __a : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : str = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__a : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : int = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[int] = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = 20
__a : str = model_class_name(_UpperCAmelCase )
__a : Dict = model.encode(inputs_dict['''input_ids'''] )
__a , __a : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Tuple = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = 99
def _lowerCamelCase ( self ):
__a : Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a : List[Any] = input_ids.shape[0]
__a : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ):
__a , __a , __a : List[str] = self._get_config_and_data()
__a : List[str] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Tuple = lm_model(input_ids=_UpperCAmelCase )
__a : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a : List[Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a : Dict = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a : Dict = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a : Union[str, Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Tuple = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__a : Union[str, Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
__a : List[Any] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : int = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
__a : List[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : int = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Tuple = model_class(_UpperCAmelCase )
__a : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__a : Dict = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('''JIT Enabled''' ):
__a : Any = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : Optional[int] = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : Tuple = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
__a : Tuple = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase ) | 52 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( lowerCAmelCase_ : Callable[[int | float], int | float], lowerCAmelCase_ : int | float, lowerCAmelCase_ : int | float, lowerCAmelCase_ : int = 100, ):
__lowerCAmelCase = x_start
__lowerCAmelCase = fnc(lowerCAmelCase_ )
__lowerCAmelCase = 0.0
for _ in range(lowerCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCAmelCase = (x_end - x_start) / steps + xa
__lowerCAmelCase = fnc(lowerCAmelCase_ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
__lowerCAmelCase = xa
__lowerCAmelCase = fxa
return length
if __name__ == "__main__":
def a_ ( lowerCAmelCase_ : Optional[Any] ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_snake_case : Union[str, Any] = 10
while i <= 100000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 53 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowercase : List[str] =tuple[int, int]
class A :
def __init__( self: Any , _lowerCAmelCase: set[int] , _lowerCAmelCase: Mapping[EdgeT, int] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =vertices
UpperCAmelCase_ ={
(min(_lowerCAmelCase ), max(_lowerCAmelCase )): weight for edge, weight in edges.items()
}
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: EdgeT , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ =weight
def lowerCAmelCase__ ( self: str ) -> Graph:
'''simple docstring'''
UpperCAmelCase_ =Graph({min(self.vertices )} , {} )
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ =edge
UpperCAmelCase_ =weight
subgraph.add_edge(_lowerCAmelCase , _lowerCAmelCase )
return subgraph
def a__ ( lowercase__ = "p107_network.txt" ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
with open(lowercase__ ) as f:
UpperCAmelCase_ =f.read().strip().split("\n" )
UpperCAmelCase_ =[line.split("," ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ =int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ =Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
UpperCAmelCase_ =graph.prims_algorithm()
UpperCAmelCase_ =sum(graph.edges.values() )
UpperCAmelCase_ =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
SCREAMING_SNAKE_CASE :Optional[Any] = TypeVar('T')
SCREAMING_SNAKE_CASE :Dict = Union[List[T], Tuple[T, ...]]
SCREAMING_SNAKE_CASE :Optional[Any] = Union[T, List[T], Dict[str, T]]
SCREAMING_SNAKE_CASE :Any = Union[str, bytes, os.PathLike]
| 55 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : Dict = list[tuple[int, int]]
_a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowercase :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Node | None , ) -> Optional[int]:
__snake_case = pos_x
__snake_case = pos_y
__snake_case = (pos_y, pos_x)
__snake_case = goal_x
__snake_case = goal_y
__snake_case = g_cost
__snake_case = parent
__snake_case = self.calculate_heuristic()
def a ( self : Tuple ) -> float:
__snake_case = abs(self.pos_x - self.goal_x )
__snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> bool:
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] ) -> str:
__snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ )
__snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , SCREAMING_SNAKE_CASE_ )
__snake_case = [self.start]
__snake_case = []
__snake_case = False
def a ( self : Optional[int] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__snake_case = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
__snake_case = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Node ) -> list[Node]:
__snake_case = []
for action in delta:
__snake_case = parent.pos_x + action[1]
__snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) )
return successors
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Node | None ) -> Path:
__snake_case = node
__snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_a : List[str] = (0, 0)
_a : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_a : int = GreedyBestFirst(init, goal)
_a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_a : Union[str, Any] = 2
for elem in grid:
print(elem)
| 56 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case (UpperCAmelCase__ ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase_: Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ , id=UpperCAmelCase__ ) | 57 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = IFPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> List[str]:
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
snake_case_ : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
snake_case_ : Optional[int] = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_lowercase , tokenizer=_lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
snake_case_ , snake_case_ : int = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case_ : Union[str, Any] = None
snake_case_ : int = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case_ : int = IFImgaImgPipeline(**pipe_a.components )
snake_case_ : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_lowercase , _lowercase , _lowercase , _lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case_ : int = IFInpaintingPipeline(**pipe_a.components )
snake_case_ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : str = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Union[str, Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
snake_case_ : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : Union[str, Any] = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_lowercase )
snake_case_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Any = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_lowercase )
snake_case_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_lowercase )
snake_case_ : int = pipe_a(
prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , )
snake_case_ : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_lowercase , _lowercase )
def __lowerCAmelCase ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 0 |
__A = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 59 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import math
import sys
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = ''''''
try:
with open(_UpperCamelCase , '''rb''' ) as binary_file:
snake_case_ : Dict = binary_file.read()
for dat in data:
snake_case_ : Union[str, Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = {'''0''': '''0''', '''1''': '''1'''}
snake_case_ , snake_case_ : List[Any] = '''''', ''''''
snake_case_ : List[str] = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case_ : List[str] = lexicon[curr_string]
result += last_match_id
snake_case_ : int = last_match_id + '''0'''
if math.loga(_UpperCamelCase ).is_integer():
snake_case_ : int = {}
for curr_key in list(_UpperCamelCase ):
snake_case_ : Tuple = lexicon.pop(_UpperCamelCase )
snake_case_ : Optional[int] = new_lex
snake_case_ : str = last_match_id + '''1'''
index += 1
snake_case_ : Tuple = ''''''
return result
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = 8
try:
with open(_UpperCamelCase , '''wb''' ) as opened_file:
snake_case_ : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case_ : Union[str, Any] = data_bits[counter:]
snake_case_ : Optional[int] = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = read_file_binary(_UpperCamelCase )
snake_case_ : int = remove_prefix(_UpperCamelCase )
snake_case_ : Dict = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 60 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Node(1 )
lowerCAmelCase__ = Node(2 )
lowerCAmelCase__ = Node(3 )
lowerCAmelCase__ = Node(4 )
lowerCAmelCase__ = Node(5 )
return tree
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
lowerCAmelCase__ = []
if root is None:
return output
lowerCAmelCase__ = deque([root] )
while process_queue:
lowerCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
if root is None:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = height(lowerCAmelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = 0
return output
def _A ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ = make_tree()
print(F'In-order Traversal: {inorder(lowerCAmelCase_ )}' )
print(F'Pre-order Traversal: {preorder(lowerCAmelCase_ )}' )
print(F'Post-order Traversal: {postorder(lowerCAmelCase_ )}' , "\n" )
print(F'Height of Tree: {height(lowerCAmelCase_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCAmelCase_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCAmelCase_ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase_ , level=lowerCAmelCase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = IFPipeline
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCamelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self : int ):
return self._get_dummy_components()
def _A ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _A ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self : str ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self : Optional[int] ):
self._test_save_load_local()
def _A ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
# if
SCREAMING_SNAKE_CASE : int = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : int = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Dict = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , num_inference_steps=2 , generator=UpperCAmelCase_ , output_type="np" , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=2 , generator=UpperCAmelCase_ , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , original_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , num_inference_steps=2 , generator=UpperCAmelCase_ , output_type="np" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = pipe_a(
prompt_embeds=UpperCAmelCase_ , negative_prompt_embeds=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , original_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 62 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
__UpperCAmelCase : Optional[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
return data[1:] + data[0]
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
__UpperCAmelCase : str = """"""
for i in range(len(__lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = int("""0b""" + data[0] + data[-1] , 2 )
__UpperCAmelCase : str = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : List[str] = message[:4]
__UpperCAmelCase : Optional[int] = message[4:]
__UpperCAmelCase : List[Any] = apply_table(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = xor(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = apply_sbox(__lowerCamelCase , temp[:4] ) # noqa: E741
__UpperCAmelCase : str = apply_sbox(__lowerCamelCase , temp[4:] )
__UpperCAmelCase : Optional[Any] = """0""" * (2 - len(__lowerCamelCase )) + l # noqa: E741
__UpperCAmelCase : Optional[Any] = """0""" * (2 - len(__lowerCamelCase )) + r
__UpperCAmelCase : Tuple = apply_table(l + r , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = xor(__lowerCamelCase , __lowerCamelCase )
return temp + right
if __name__ == "__main__":
a : Dict = input("Enter 10 bit key: ")
a : Tuple = input("Enter 8 bit message: ")
a : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
a : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a : Dict = [2, 4, 3, 1]
a : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
a : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a : Union[str, Any] = apply_table(key, paa_table)
a : Any = temp[:5]
a : Optional[Any] = temp[5:]
a : Optional[int] = left_shift(left)
a : int = left_shift(right)
a : List[str] = apply_table(left + right, pa_table)
a : List[Any] = left_shift(left)
a : int = left_shift(right)
a : Optional[Any] = left_shift(left)
a : str = left_shift(right)
a : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
a : Optional[int] = apply_table(message, IP)
a : Union[str, Any] = function(expansion, sa, sa, keya, temp)
a : Union[str, Any] = temp[4:] + temp[:4]
a : str = function(expansion, sa, sa, keya, temp)
a : Dict = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
a : Any = apply_table(CT, IP)
a : Optional[int] = function(expansion, sa, sa, keya, temp)
a : List[Any] = temp[4:] + temp[:4]
a : List[str] = function(expansion, sa, sa, keya, temp)
a : Any = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 63 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase :
__a = 42
__a = None
@staticmethod
def UpperCamelCase_ ( ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
raise NotImplementedError
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
raise NotImplementedError
def UpperCamelCase_ ( self ) -> Any:
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
return f'`pip install {cls.pip_package or cls.name}`'
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "optuna"
@staticmethod
def UpperCamelCase_ ( ) -> Tuple:
return is_optuna_available()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> List[Any]:
return run_hp_search_optuna(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[Any]:
return default_hp_space_optuna(lowerCAmelCase )
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "ray"
__a = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ) -> int:
return is_ray_available()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> int:
return run_hp_search_ray(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
return default_hp_space_ray(lowerCAmelCase )
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "sigopt"
@staticmethod
def UpperCamelCase_ ( ) -> Optional[int]:
return is_sigopt_available()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
return run_hp_search_sigopt(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
return default_hp_space_sigopt(lowerCAmelCase )
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "wandb"
@staticmethod
def UpperCamelCase_ ( ) -> Dict:
return is_wandb_available()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
return run_hp_search_wandb(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Dict:
return default_hp_space_wandb(lowerCAmelCase )
lowercase_ : List[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[int]= [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: Any= available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
F'{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : str = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
UpperCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if "metadata" in layer:
UpperCAmelCase__ : int = layer.split("""metadata""" )
UpperCAmelCase__ : str = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : Any = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCAmelCase__ : int = layer.split("""kvstore""" )
UpperCAmelCase__ : Dict = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCAmelCase__ : Optional[Any] = layer.split("""/""" )
UpperCAmelCase__ : List[Any] = """/""".join(split_layer[:-1] )
UpperCAmelCase__ : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase__ : Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCAmelCase__ : Optional[int] = """file"""
else:
UpperCAmelCase__ : List[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = rename_keys(__UpperCamelCase )
UpperCAmelCase__ : List[str] = {}
for k, v in current_block.items():
UpperCAmelCase__ : List[str] = v
UpperCAmelCase__ : Any = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ):
'''simple docstring'''
UpperCAmelCase__ : Dict = convert_file_size_to_int(__UpperCamelCase )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCAmelCase__ : Any = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCAmelCase__ : List[str] = flatten_dict(__UpperCamelCase , sep="""/""" )
UpperCAmelCase__ : List[str] = {}
for layer in checkpoint_info.keys():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
UpperCAmelCase__ : str = content
else:
UpperCAmelCase__ : Optional[int] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase__ : int = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCamelCase )
UpperCAmelCase__ : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase__ , UpperCAmelCase__ : int = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __UpperCamelCase )
UpperCAmelCase__ : Any = """/""".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase__ : Union[str, Any] = os.path.join(
__UpperCamelCase , weights_name.replace(""".bin""" , F"-{len(__UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Any = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase__ : Union[str, Any] = os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , F"-{len(__UpperCamelCase )+1:05d}-of-???.bin" ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[str] = {}
for idx, shard in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Dict = weights_name.replace(
""".bin""" , F"-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCAmelCase__ : int = os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = shard
for key in shard:
UpperCAmelCase__ : Any = shard_file
# Add the metadata
UpperCAmelCase__ : Optional[int] = {"""total_size""": total_size}
UpperCAmelCase__ : Tuple = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Union[str, Any] = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__UpperCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase__ : List[str] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCAmelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCAmelCase__ : Optional[int] = TaTokenizer.from_pretrained("""t5-small""" )
UpperCAmelCase__ : Tuple = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCAmelCase__ : Optional[int] = tokenizer(__UpperCamelCase , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : List[Any] = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 65 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
UpperCamelCase = {
"num_train_timesteps": 151,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
_lowercase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
_lowercase : int = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
_lowercase : Optional[int] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
_lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
_lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
_lowercase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
_lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
_lowercase : List[str] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
_lowercase : Optional[int] = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
_lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
_lowercase : int = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[str]:
_lowercase , _lowercase , _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowercase , _lowercase , _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowercase : str = checkpoint[F"""{old_prefix}.norm.weight"""]
_lowercase : Tuple = checkpoint[F"""{old_prefix}.norm.bias"""]
_lowercase : int = weight_q.squeeze(-1 ).squeeze(-1 )
_lowercase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowercase : int = weight_k.squeeze(-1 ).squeeze(-1 )
_lowercase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
_lowercase : List[str] = weight_v.squeeze(-1 ).squeeze(-1 )
_lowercase : List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowercase : Tuple = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowercase : Dict = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : str = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
_lowercase : str = {}
_lowercase : str = checkpoint['time_embed.0.weight']
_lowercase : Tuple = checkpoint['time_embed.0.bias']
_lowercase : Optional[Any] = checkpoint['time_embed.2.weight']
_lowercase : int = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowercase : Dict = checkpoint['label_emb.weight']
_lowercase : Tuple = checkpoint['input_blocks.0.0.weight']
_lowercase : Optional[Any] = checkpoint['input_blocks.0.0.bias']
_lowercase : Union[str, Any] = unet_config['down_block_types']
_lowercase : Tuple = unet_config['layers_per_block']
_lowercase : Any = unet_config['attention_head_dim']
_lowercase : List[Any] = unet_config['block_out_channels']
_lowercase : Optional[Any] = 1
_lowercase : List[str] = channels_list[0]
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = channels_list[i]
_lowercase : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""down_blocks.{i}.resnets.{j}"""
_lowercase : Dict = F"""input_blocks.{current_layer}.0"""
_lowercase : int = True if j == 0 and downsample_block_has_skip else False
_lowercase : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = F"""down_blocks.{i}.resnets.{j}"""
_lowercase : Union[str, Any] = F"""input_blocks.{current_layer}.0"""
_lowercase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowercase : Union[str, Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
_lowercase : Dict = F"""down_blocks.{i}.attentions.{j}"""
_lowercase : int = F"""input_blocks.{current_layer}.1"""
_lowercase : Any = convert_attention(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : int = F"""down_blocks.{i}.downsamplers.0"""
_lowercase : List[Any] = F"""input_blocks.{current_layer}.0"""
_lowercase : Tuple = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
_lowercase : List[Any] = current_channels
# hardcoded the mid-block for now
_lowercase : List[Any] = 'mid_block.resnets.0'
_lowercase : Union[str, Any] = 'middle_block.0'
_lowercase : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = 'mid_block.attentions.0'
_lowercase : Any = 'middle_block.1'
_lowercase : str = convert_attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = 'mid_block.resnets.1'
_lowercase : Optional[Any] = 'middle_block.2'
_lowercase : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = 0
_lowercase : List[str] = unet_config['up_block_types']
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowercase : Tuple = F"""up_blocks.{i}.resnets.{j}"""
_lowercase : int = F"""output_blocks.{current_layer}.0"""
_lowercase : str = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : Dict = F"""up_blocks.{i}.upsamplers.0"""
_lowercase : Optional[Any] = F"""output_blocks.{current_layer-1}.1"""
_lowercase : Dict = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowercase : Tuple = F"""up_blocks.{i}.resnets.{j}"""
_lowercase : List[str] = F"""output_blocks.{current_layer}.0"""
_lowercase : Optional[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
_lowercase : str = F"""up_blocks.{i}.attentions.{j}"""
_lowercase : Dict = F"""output_blocks.{current_layer}.1"""
_lowercase : Optional[Any] = convert_attention(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : int = F"""up_blocks.{i}.upsamplers.0"""
_lowercase : str = F"""output_blocks.{current_layer-1}.2"""
_lowercase : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = checkpoint['out.0.weight']
_lowercase : Optional[Any] = checkpoint['out.0.bias']
_lowercase : Dict = checkpoint['out.2.weight']
_lowercase : Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 66 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = []
_lowercase = 2
_lowercase = int(math.sqrt(snake_case__ ) ) # Size of every segment
_lowercase = [True] * (end + 1)
_lowercase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case__ )
for i in range(start * start , end + 1 , snake_case__ ):
_lowercase = False
start += 1
prime += in_prime
_lowercase = end + 1
_lowercase = min(2 * end , snake_case__ )
while low <= n:
_lowercase = [True] * (high - low + 1)
for each in in_prime:
_lowercase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case__ , high + 1 , snake_case__ ):
_lowercase = False
for j in range(len(snake_case__ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase = high + 1
_lowercase = min(high + end , snake_case__ )
return prime
print(sieve(1_0**6)) | 67 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 0 |
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase__ ( A_: list[list[int]] , A_: list[int] , A_: list[int] , A_: int , A_: list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__UpperCAmelCase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the reference grid
__UpperCAmelCase =1
__UpperCAmelCase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the action grid
__UpperCAmelCase =init[0]
__UpperCAmelCase =init[1]
__UpperCAmelCase =0
__UpperCAmelCase =g + heuristic[x][y] # cost from starting cell to destination cell
__UpperCAmelCase =[[f, g, x, y]]
__UpperCAmelCase =False # flag that is set when search is complete
__UpperCAmelCase =False # flag set if we can't find expand
while not found and not resign:
if len(A_ ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__UpperCAmelCase =cell.pop()
__UpperCAmelCase =next_cell[2]
__UpperCAmelCase =next_cell[3]
__UpperCAmelCase =next_cell[1]
if x == goal[0] and y == goal[1]:
__UpperCAmelCase =True
else:
for i in range(len(A_ ) ): # to try out different valid actions
__UpperCAmelCase =x + DIRECTIONS[i][0]
__UpperCAmelCase =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__UpperCAmelCase =g + cost
__UpperCAmelCase =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__UpperCAmelCase =1
__UpperCAmelCase =i
__UpperCAmelCase =[]
__UpperCAmelCase =goal[0]
__UpperCAmelCase =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__UpperCAmelCase =x - DIRECTIONS[action[x][y]][0]
__UpperCAmelCase =y - DIRECTIONS[action[x][y]][1]
__UpperCAmelCase =xa
__UpperCAmelCase =ya
invpath.append([x, y] )
__UpperCAmelCase =[]
for i in range(len(A_ ) ):
path.append(invpath[len(A_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 68 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.nn.Linear(2 , 4 )
__snake_case = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case = torch.optim.lr_scheduler.OneCycleLR(_UpperCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> List[Any]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Union[str, Any]:
__snake_case = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@require_cuda
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case = Accelerator(cpu=a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case = GradientState()
assert state.num_steps == 1
__snake_case = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case = False
assert state.sync_gradients is False
GradientState._reset_state()
def A ( self : int ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A ( self : int ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A ( self : Tuple ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ : Tuple , **a_ : Optional[int] ):
pass
with patch("torch.cuda.set_device" , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
__snake_case = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case = get_signature(a_ )
# saving hook
def save_config(a_ : int , a_ : str , a_ : List[str] ):
__snake_case = {"class_name": models[0].__class__.__name__}
with open(os.path.join(a_ , "data.json" ) , "w" ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ : int , a_ : int ):
with open(os.path.join(a_ , "data.json" ) , "r" ) as f:
__snake_case = json.load(a_ )
__snake_case = config["class_name"]
__snake_case = accelerator.register_save_state_pre_hook(a_ )
__snake_case = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
__snake_case = "random"
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
__snake_case = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A ( self : int ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def A ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map={"": 0} , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(a_ )
@slow
@require_bnb
def A ( self : List[Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__snake_case = Accelerator()
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
__snake_case = infer_auto_device_map(a_ )
__snake_case = "cpu"
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def A ( self : Optional[Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
__snake_case = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
__snake_case = infer_auto_device_map(a_ )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map=a_ , )
__snake_case = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A ( self : Tuple ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
__snake_case = infer_auto_device_map(a_ )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map=a_ , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(a_ )
@require_cuda
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = torch.nn.Linear(10 , 10 )
__snake_case = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case = Accelerator(cpu=a_ )
__snake_case = accelerator.prepare(a_ )
| 69 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
lowerCamelCase : Union[str, Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
lowerCamelCase : Optional[int] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def a__ ( self : Any , A_ : Optional[Any] , A_ : List[Any] , A_ : List[Any]=None ) -> Tuple:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(A_ , A_ , sample_weight=A_ ) ),
}
| 70 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase ={
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
lowercase =f'{src_lang}-{tgt_lang}'
lowercase =f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
lowercase =os.path.join(lowercase_ , '''README.md''' )
print(f'Generating {path}' )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowercase_ )
# make sure we are under the root of the project
_UpperCAmelCase : Optional[Any] = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCAmelCase : str = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 72 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : str = '''luke'''
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = entity_vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = entity_emb_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_entity_aware_attention
SCREAMING_SNAKE_CASE = classifier_dropout
| 73 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Union[str, Any] , **_A : List[str] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__SCREAMING_SNAKE_CASE : str = deprecated_arg[3:]
__SCREAMING_SNAKE_CASE : Optional[int] = not kwargs.pop(_A )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''tpu_name''' , self.tpu_name )
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''device_idx''' , self.device_idx )
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''eager_mode''' , self.eager_mode )
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_A )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Name of TPU'''} , )
lowerCAmelCase_ = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCAmelCase_ = field(
default=lowerCAmelCase__ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
__SCREAMING_SNAKE_CASE : str = None
if self.tpu:
try:
if self.tpu_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__SCREAMING_SNAKE_CASE : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
return tpu
@cached_property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__SCREAMING_SNAKE_CASE : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
__SCREAMING_SNAKE_CASE : List[Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return self.n_gpu > 0
| 74 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase_ :
def __init__( self : Any , _A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = 13
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Dict = 99
UpperCAmelCase__ : List[Any] = 32
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : str = 37
UpperCAmelCase__ : Any = '''gelu'''
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Dict = 0.1
UpperCAmelCase__ : Tuple = 512
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Union[str, Any] = 0.0_2
UpperCAmelCase__ : Union[str, Any] = 3
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Any = None
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Any = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : int , _A : int , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : str = model(_A )
UpperCAmelCase__ : str = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : str , _A : Tuple , _A : Any , _A : Any , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : str , ):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = TFEsmModel(config=_A )
UpperCAmelCase__ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
UpperCAmelCase__ : List[str] = model(_A )
UpperCAmelCase__ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase__ : List[str] = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
UpperCAmelCase__ : str = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , _A : int , _A : Tuple , _A : List[Any] , _A : Dict , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFEsmForMaskedLM(config=_A )
UpperCAmelCase__ : str = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : Any = TFEsmForTokenClassification(config=_A )
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = TFEsmModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase__ : List[Any] = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ : Tuple = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[Any] = model(_A )[0]
UpperCAmelCase__ : List[Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
UpperCAmelCase__ : Dict = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase__ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Any = model(_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : Optional[int] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 75 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase_ :
UpperCamelCase =None
def _lowerCamelCase ( self ) -> str:
__lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = os.path.join(UpperCamelCase_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase_ )
__lowercase : Union[str, Any] = self.feature_extraction_class.from_json_file(UpperCamelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Union[str, Any] = feat_extract_first.save_pretrained(UpperCamelCase_ )[0]
check_json_file_has_correct_format(UpperCamelCase_ )
__lowercase : Optional[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase_ )
| 76 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__UpperCAmelCase : List[Any] = len(UpperCamelCase ) if (len(UpperCamelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(UpperCamelCase ) , "Postfix".center(UpperCamelCase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase ) == 0:
stack.append(UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase ) # push x to stack
print(
x.center(8 ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=" | " , ) # Output in tabular format
while len(UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=" | " , ) # Output in tabular format
return "".join(UpperCamelCase ) # return Postfix as str
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase ) ):
if infix[i] == "(":
__UpperCAmelCase : Optional[int] = ")" # change "(" to ")"
elif infix[i] == ")":
__UpperCAmelCase : int = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input("""\nEnter an Infix Equation = """) # Input an Infix equation
A = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 77 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE_: Optional[Any] ={value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = "Morse code here!"
print(snake_case_ )
UpperCAmelCase_ = encrypt(snake_case_ )
print(snake_case_ )
UpperCAmelCase_ = decrypt(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
main()
| 78 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 0 |
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = self.values[key]
def __UpperCAmelCase ( self ):
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 79 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
__lowercase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(_lowerCAmelCase )
from datasets import load_dataset
__lowercase = load_dataset("""nielsr/rvlcdip-demo""" )
__lowercase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
__lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
__lowercase = outputs.logits
__lowercase = torch.Size((1, 16) )
self.assertEqual(logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 80 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=3 , lowerCamelCase : Dict=32 , lowerCamelCase : List[Any]=3 , lowerCamelCase : List[str]=10 , lowerCamelCase : List[str]=[10, 20, 30, 40] , lowerCamelCase : Optional[Any]=[1, 1, 2, 1] , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Tuple=True , lowerCamelCase : int="relu" , lowerCamelCase : int=3 , lowerCamelCase : str=None , ) -> Optional[int]:
__snake_case : str = parent
__snake_case : List[Any] = batch_size
__snake_case : int = image_size
__snake_case : Optional[Any] = num_channels
__snake_case : Any = embeddings_size
__snake_case : Dict = hidden_sizes
__snake_case : Tuple = depths
__snake_case : List[str] = is_training
__snake_case : int = use_labels
__snake_case : str = hidden_act
__snake_case : Tuple = num_labels
__snake_case : Optional[int] = scope
__snake_case : int = len(lowerCamelCase )
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> Tuple:
__snake_case : Dict = TFResNetModel(config=lowerCamelCase )
__snake_case : Dict = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[int]:
__snake_case : Optional[int] = self.num_labels
__snake_case : List[Any] = TFResNetForImageClassification(lowerCamelCase )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int ) -> Dict:
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = False
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Any = TFResNetModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[str] ) -> Optional[int]:
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def __snake_case ( self : Tuple ) -> List[Any]:
pass
def __snake_case ( self : Optional[Any] ) -> Tuple:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(lowerCamelCase )
__snake_case : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> str:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Tuple ) -> Optional[int]:
def check_hidden_states_output(lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ):
__snake_case : List[Any] = model_class(lowerCamelCase )
__snake_case : Tuple = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Tuple = layer_type
__snake_case : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> int:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TFResNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Dict ) -> str:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : str = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[str] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase , atol=1E-4 ) )
| 81 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 0 |
"""simple docstring"""
from typing import List
import numpy as np
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_, A_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_lowerCamelCase : List[str] = max(lists_lengths.values(), default=0 )
return max(1, A_ )
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : int = []
for group_idx in range(A_ ):
_lowerCamelCase : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase : Any = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase : List[str] = range(A_, start + num_shards_to_add )
shards_indices_per_group.append(A_ )
return shards_indices_per_group
def snake_case_ ( A_ : dict, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = _number_of_shards_in_gen_kwargs(A_ )
if num_shards == 1:
return [dict(A_ )]
else:
_lowerCamelCase : Union[str, Any] = _distribute_shards(num_shards=A_, max_num_jobs=A_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A_, A_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A_ ) )
]
def snake_case_ ( A_ : List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], A_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def snake_case_ ( A_ : np.random.Generator, A_ : dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_, A_ )}
_lowerCamelCase : Tuple = {}
for size in list_sizes:
_lowerCamelCase : str = list(range(A_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase : Optional[int] = dict(A_ )
for key, value in shuffled_kwargs.items():
if isinstance(A_, A_ ):
_lowerCamelCase : int = [value[i] for i in indices_per_size[len(A_ )]]
return shuffled_kwargs
| 83 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase = '''
Human: <<task>>
Assistant: '''
UpperCAmelCase = '''huggingface-tools/default-prompts'''
UpperCAmelCase = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="run" ):
if prompt_or_repo_id is None:
lowercase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
lowercase = cached_file(
__SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
return f.read()
| 84 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE__ : int = os.path.join(os.path.expanduser("~"), ".cache")
SCREAMING_SNAKE_CASE__ : int = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _a ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _a ( lowercase__ : Dict , lowercase__ : Dict ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def _a ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Any=False , lowercase__ : int="text" ):
'''simple docstring'''
if model_type == "text":
SCREAMING_SNAKE_CASE__ : List[str] = BarkSemanticModel
SCREAMING_SNAKE_CASE__ : Optional[int] = BarkSemanticConfig
SCREAMING_SNAKE_CASE__ : Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE__ : Tuple = BarkCoarseModel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BarkCoarseConfig
SCREAMING_SNAKE_CASE__ : List[str] = BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE__ : Tuple = BarkFineModel
SCREAMING_SNAKE_CASE__ : Tuple = BarkFineConfig
SCREAMING_SNAKE_CASE__ : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE__ : Tuple = f'''{model_type}_small''' if use_small else model_type
SCREAMING_SNAKE_CASE__ : Dict = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
SCREAMING_SNAKE_CASE__ : str = torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE__ : List[Any] = model_args['vocab_size']
SCREAMING_SNAKE_CASE__ : str = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_args.pop('n_head' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_args.pop('n_embd' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_args.pop('n_layer' )
SCREAMING_SNAKE_CASE__ : Dict = ConfigClass(**checkpoint['model_args'] )
SCREAMING_SNAKE_CASE__ : str = ModelClass(config=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = GenerationConfigClass()
SCREAMING_SNAKE_CASE__ : List[str] = model_generation_config
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint['model']
# fixup checkpoint
SCREAMING_SNAKE_CASE__ : List[str] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE__ : Optional[int] = k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : int = set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE__ : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
SCREAMING_SNAKE_CASE__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE__ : Optional[int] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowercase__ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(lowercase__ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(lowercase__ , strict=lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.num_parameters(exclude_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss''' )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def _a ( lowercase__ : Dict , lowercase__ : Tuple=False , lowercase__ : str="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE__ : Dict = 'cpu' # do conversion on cpu
SCREAMING_SNAKE_CASE__ : str = _get_ckpt_path(lowercase__ , use_small=lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
SCREAMING_SNAKE_CASE__ : List[str] = _bark_load_model(lowercase__ , 'cpu' , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
SCREAMING_SNAKE_CASE__ : Any = bark_model['model']
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 5
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE__ : str = bark_model(lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : str = model(lowercase__ )
# take last logits
SCREAMING_SNAKE_CASE__ : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Dict = 8
SCREAMING_SNAKE_CASE__ : str = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE__ : Any = model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = bark_model(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def _a ( lowercase__ : Dict , lowercase__ : Any , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
SCREAMING_SNAKE_CASE__ : int = BarkFineConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
SCREAMING_SNAKE_CASE__ : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
SCREAMING_SNAKE_CASE__ : Dict = BarkSemanticModel.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = BarkCoarseModel.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BarkFineModel.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : int = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BarkModel(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = semantic
SCREAMING_SNAKE_CASE__ : str = coarseAcoustic
SCREAMING_SNAKE_CASE__ : int = fineAcoustic
SCREAMING_SNAKE_CASE__ : Optional[Any] = codec
SCREAMING_SNAKE_CASE__ : str = bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 85 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 0 |
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 86 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , lowercase_ , (alpha * (np.exp(lowercase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=A_ ):
__UpperCAmelCase = ['''torch''', '''scipy''']
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> str:
requires_backends(self , ["""torch""", """scipy"""])
@classmethod
def UpperCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Optional[int]:
requires_backends(cls , ["""torch""", """scipy"""])
@classmethod
def UpperCamelCase_ ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
requires_backends(cls , ["""torch""", """scipy"""])
| 88 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowerCamelCase( _a ):
lowercase_ : Dict = """Wav2Vec2FeatureExtractor"""
lowercase_ : Tuple = """AutoTokenizer"""
def __init__( self, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase)
_lowercase : Any = self.feature_extractor
_lowercase : Dict = False
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase, **lowerCamelCase)
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ', lowerCamelCase, )
_lowercase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase, **lowerCamelCase)
_lowercase : int = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase, **lowerCamelCase)
return cls(feature_extractor=lowerCamelCase, tokenizer=lowerCamelCase)
def __call__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase, **lowerCamelCase)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
_lowercase : Any = kwargs.pop('raw_speech')
else:
_lowercase : Union[str, Any] = kwargs.pop('audio', lowerCamelCase)
_lowercase : int = kwargs.pop('sampling_rate', lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('text', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : Optional[int] = args[0]
_lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
_lowercase : Union[str, Any] = self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : List[str] = self.tokenizer(lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Any = encodings['input_ids']
return inputs
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase, **lowerCamelCase)
_lowercase : Optional[int] = kwargs.pop('input_features', lowerCamelCase)
_lowercase : Dict = kwargs.pop('labels', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : List[str] = args[0]
_lowercase : List[Any] = args[1:]
if input_features is not None:
_lowercase : Optional[int] = self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
if labels is not None:
_lowercase : Dict = self.tokenizer.pad(lowerCamelCase, **lowerCamelCase)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowercase : Any = labels['input_ids']
return input_features
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@contextmanager
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
_lowercase : Optional[int] = True
_lowercase : int = self.tokenizer
yield
_lowercase : Any = self.feature_extractor
_lowercase : List[Any] = False
| 89 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = "facebook/nllb-200-distilled-600M"
lowercase__ : List[Any] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ : Union[str, Any] = "translator"
lowercase__ : Union[str, Any] = AutoTokenizer
lowercase__ : Dict = AutoModelForSeqaSeqLM
lowercase__ : Tuple = LANGUAGE_CODES
lowercase__ : int = ["text", "text", "text"]
lowercase__ : Dict = ["text"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase_ , return_tensors='''pt''' , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.model.generate(**lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase_ ) | 90 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case ( snake_case__ : Tuple , snake_case__ : int ):
A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
A = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
A = transform(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
return image
def _snake_case ( snake_case__ : Optional[Any] ):
if "visual_encoder" in key:
A = re.sub('visual_encoder*' , 'vision_model.encoder' , snake_case__ )
if "blocks" in key:
A = re.sub(r'blocks' , 'layers' , snake_case__ )
if "attn" in key:
A = re.sub(r'attn' , 'self_attn' , snake_case__ )
if "norm1" in key:
A = re.sub(r'norm1' , 'layer_norm1' , snake_case__ )
if "norm2" in key:
A = re.sub(r'norm2' , 'layer_norm2' , snake_case__ )
if "encoder.norm" in key:
A = re.sub(r'encoder.norm' , 'post_layernorm' , snake_case__ )
if "encoder.patch_embed.proj" in key:
A = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , snake_case__ )
if "encoder.pos_embed" in key:
A = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , snake_case__ )
if "encoder.cls_token" in key:
A = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , snake_case__ )
if "self_attn" in key:
A = re.sub(r'self_attn.proj' , 'self_attn.projection' , snake_case__ )
return key
@torch.no_grad()
def _snake_case ( snake_case__ : Any , snake_case__ : str=None ):
if config_path is not None:
A = BlipConfig.from_pretrained(snake_case__ )
else:
A = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
A = BlipForConditionalGeneration(snake_case__ ).eval()
A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
A = blip_decoder(pretrained=snake_case__ , image_size=384 , vit='base' )
A = pt_model.eval()
A = pt_model.state_dict()
for key in modified_state_dict.copy():
A = modified_state_dict.pop(snake_case__ )
A = rename_key(snake_case__ )
A = value
hf_model.load_state_dict(snake_case__ )
A = 384
A = load_demo_image(image_size=snake_case__ , device='cpu' )
A = BertTokenizer.from_pretrained('bert-base-uncased' )
A = tokenizer(['a picture of'] ).input_ids
A = hf_model.generate(snake_case__ , snake_case__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
A = hf_model.generate(snake_case__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
A = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit='base' )
vqa_model.eval()
A = vqa_model.state_dict()
for key in modified_state_dict.copy():
A = modified_state_dict.pop(snake_case__ )
A = rename_key(snake_case__ )
A = value
A = BlipForQuestionAnswering(snake_case__ )
hf_vqa_model.load_state_dict(snake_case__ )
A = ['How many dogs are in this image?']
A = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
A = hf_vqa_model.generate(snake_case__ , snake_case__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
A = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit='base' )
itm_model.eval()
A = itm_model.state_dict()
for key in modified_state_dict.copy():
A = modified_state_dict.pop(snake_case__ )
A = rename_key(snake_case__ )
A = value
A = BlipForImageTextRetrieval(snake_case__ )
A = ['A picture of a woman with a dog sitting in a beach']
A = tokenizer(
snake_case__ , return_tensors='pt' , padding='max_length' , truncation=snake_case__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case__ )
hf_itm_model.eval()
A = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
A = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 91 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
lowerCamelCase_ = 'nat'
lowerCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : int=[3, 4, 6, 5] , UpperCAmelCase__ : Optional[int]=[2, 4, 8, 16] , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Optional[Any]=3.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-5 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Any =patch_size
lowercase : Tuple =num_channels
lowercase : Tuple =embed_dim
lowercase : str =depths
lowercase : Optional[Any] =len(UpperCAmelCase__ )
lowercase : str =num_heads
lowercase : Optional[Any] =kernel_size
lowercase : List[str] =mlp_ratio
lowercase : Dict =qkv_bias
lowercase : List[Any] =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =drop_path_rate
lowercase : List[str] =hidden_act
lowercase : List[str] =layer_norm_eps
lowercase : Optional[int] =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : Union[str, Any] =int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowercase : int =layer_scale_init_value
lowercase : Union[str, Any] =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowercase , lowercase : str =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 92 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowercase_ ( __A : Optional[Any] ) -> int:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
lowercase : int ='''mock-s3-bucket'''
lowercase : Optional[int] =F's3://{mock_bucket}'
lowercase : List[Any] =extract_path_from_uri(__A )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : int ='''./local/path'''
lowercase : str =extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowercase_ ( __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Tuple =is_remote_filesystem(__A )
assert is_remote is True
lowercase : Optional[Any] =fsspec.filesystem('''file''' )
lowercase : int =is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __A )
def lowercase_ ( __A : List[str] , __A : int , __A : Union[str, Any] , __A : List[str] , __A : Optional[int] , __A : Optional[int] , __A : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : str ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : Any =input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Tuple =F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
lowercase : Any =fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
lowercase : int =os.path.basename(__A )
lowercase : str =expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__A , '''r''' , encoding='''utf-8''' ) as f, open(__A , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowercase_ ( __A : Union[str, Any] , __A : Optional[int] , __A : int ) -> Any:
"""simple docstring"""
lowercase : List[str] ={'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : int =compressed_file_paths[protocol]
lowercase : Optional[Any] ='''dataset.jsonl'''
lowercase : Any =F'{protocol}://{member_file_path}::{compressed_file_path}'
lowercase , *lowercase : Dict =fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowercase_ ( __A : List[str] , __A : Tuple , __A : Dict , __A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple =hf_api.dataset_info(__A , token=__A )
lowercase : List[Any] =HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__A ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowercase_ ( ) -> Dict:
"""simple docstring"""
lowercase : str ='''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 94 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 0 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def a ( __UpperCAmelCase : int ) -> datetime:
__magic_name__: str = year % 1_9
__magic_name__: Any = year % 4
__magic_name__: Dict = year % 7
__magic_name__: Optional[Any] = math.floor(year / 1_0_0 )
__magic_name__: Union[str, Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__magic_name__: Tuple = leap_day_inhibits / 4
__magic_name__: List[str] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__magic_name__: List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__magic_name__: Union[str, Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__magic_name__: str = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 1_8 )
else:
return datetime(__UpperCAmelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__lowerCamelCase = 'will be' if year > datetime.now().year else 'was'
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 96 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE_ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE_ : Optional[Features] = None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : int , ) -> Dict:
super().__init__(
SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = field
lowercase_ = path_or_paths if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else {self.split: path_or_paths}
lowercase_ = Json(
cache_dir=SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , field=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase ( self : Dict ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
lowercase_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , num_proc=self.num_proc , )
lowercase_ = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dataset , SCREAMING_SNAKE_CASE_ : Union[PathLike, BinaryIO] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> int:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
lowercase_ = dataset
lowercase_ = path_or_buf
lowercase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ = num_proc
lowercase_ = '''utf-8'''
lowercase_ = to_json_kwargs
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = self.to_json_kwargs.pop('''path_or_buf''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.to_json_kwargs.pop('''orient''' , '''records''' )
lowercase_ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
lowercase_ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
lowercase_ = self.to_json_kwargs.pop('''compression''' , SCREAMING_SNAKE_CASE_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=SCREAMING_SNAKE_CASE_ ) as buffer:
lowercase_ = self._write(file_obj=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
lowercase_ = self._write(
file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , **self.to_json_kwargs )
return written
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = args
lowercase_ = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ = batch.to_pandas().to_json(
path_or_buf=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : BinaryIO , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
lowercase_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
lowercase_ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ , lowercase_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(SCREAMING_SNAKE_CASE_ )
return written
| 97 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
'''simple docstring'''
from math import pi, sqrt, tan
def a__ ( lowercase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def a__ ( lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
_UpperCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(lowercase, 2 ) * torus_radius * tube_radius
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def a__ ( lowercase : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
_UpperCamelCase = (sidea + sidea + sidea) / 2
_UpperCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def a__ ( lowercase : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( lowercase : int, lowercase : float ) -> float:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 98 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self , __A = 1 , __A = 16000 , __A = 0.0 , __A = False , __A = 80 , __A = 16 , __A = 64 , __A = "hann_window" , __A = 1.0 , __A = 80 , __A = 7600 , __A = 1E-10 , __A = 2 , __A = True , **__A , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
__a = do_normalize
__a = return_attention_mask
__a = num_mel_bins
__a = hop_length
__a = win_length
__a = win_function
__a = frame_signal_scale
__a = fmin
__a = fmax
__a = mel_floor
__a = reduction_factor
__a = win_length * sampling_rate // 1000
__a = hop_length * sampling_rate // 1000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __A , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( __A , __A , __A = 0.0 ):
if attention_mask is not None:
__a = np.array(__A , np.intaa )
__a = []
for vector, length in zip(__A , attention_mask.sum(-1 ) ):
__a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__a = padding_value
normed_input_values.append(__A )
else:
__a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self , __A , ):
__a = spectrogram(
__A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , __A = None , __A = None , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
__a = self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
else:
__a = None
if audio_target is not None:
__a = self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
if inputs is None:
return inputs_target
else:
__a = inputs_target["""input_values"""]
__a = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def snake_case_ ( self , __A , __A = False , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , **__A , ):
__a = isinstance(__A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(__A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__a = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__a = speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [speech]
# needed to make pad() work on spectrogram inputs
__a = self.feature_size
# convert into correct format for padding
if is_target:
__a = [self._extract_mel_features(__A ) for waveform in speech]
__a = BatchFeature({"""input_values""": features} )
__a = self.num_mel_bins
else:
__a = BatchFeature({"""input_values""": speech} )
__a = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
__a = feature_size_hack
# convert input values to correct format
__a = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
__a = [np.asarray(__A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__a = input_values.astype(np.floataa )
# convert attention_mask to correct format
__a = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__a = (
attention_mask
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__A , padding_value=self.padding_value )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def snake_case_ ( self ):
__a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__a = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 99 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
_A : Optional[int] = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_A : Dict = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_A : Optional[int] = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> int:
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE__ = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ )
if reduce_labels:
SCREAMING_SNAKE_CASE__ = 2_5_5
SCREAMING_SNAKE_CASE__ = label - 1
SCREAMING_SNAKE_CASE__ = 2_5_5
SCREAMING_SNAKE_CASE__ = label != ignore_index
SCREAMING_SNAKE_CASE__ = np.not_equal(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pred_label[mask]
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ )[mask]
SCREAMING_SNAKE_CASE__ = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE__ = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Any:
SCREAMING_SNAKE_CASE__ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = total_intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# compute metrics
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE__ = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE__ = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE__ = np.nanmean(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.nanmean(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = all_acc
SCREAMING_SNAKE_CASE__ = iou
SCREAMING_SNAKE_CASE__ = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE__ = {metric: np.nan_to_num(lowerCAmelCase_ , nan=lowerCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ = None , A_ = None , A_ = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = mean_iou(
results=A_ , gt_seg_maps=A_ , num_labels=A_ , ignore_index=A_ , nan_to_num=A_ , label_map=A_ , reduce_labels=A_ , )
return iou_result
| 100 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] =logging.get_logger('transformers.models.speecht5')
lowerCAmelCase__ : Union[str, Any] ={
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
lowerCAmelCase__ : Tuple ={
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
lowerCAmelCase__ : Dict ={
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
lowerCAmelCase__ : Union[str, Any] ={
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
lowerCAmelCase__ : int ={
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
lowerCAmelCase__ : Any ={
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
lowerCAmelCase__ : Optional[Any] ={
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
lowerCAmelCase__ : List[str] ={
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
lowerCAmelCase__ : List[str] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCAmelCase__ : List[Any] ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase__ : Tuple ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase__ : int =[]
lowerCAmelCase__ : str =[
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
lowerCAmelCase__ : Any =IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
lowerCAmelCase__ : List[Any] =IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
lowerCAmelCase__ : Tuple =IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def a__ ( A__, A__, A__, A__, A__ ):
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(A__, A__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(A__, A__ ).shape
else:
SCREAMING_SNAKE_CASE_ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : str = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_ : List[Any] = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a__ ( A__, A__ ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
if task == "s2t":
SCREAMING_SNAKE_CASE_ : str = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ : Optional[int] = MAPPING_S2T
SCREAMING_SNAKE_CASE_ : int = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Dict = MAPPING_T2S
SCREAMING_SNAKE_CASE_ : Any = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE_ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ : List[str] = MAPPING_S2S
SCREAMING_SNAKE_CASE_ : int = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(A__, A__ ):
logger.info(F'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
A__, A__, A__, A__, hf_model.config.feat_extract_norm == 'group', )
SCREAMING_SNAKE_CASE_ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = key.split('.*.' )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_ : Any = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : str = name.split(A__ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ : List[Any] = mapped_key.replace('*', A__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : str = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE_ : str = 'weight'
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_ : str = 'running_mean'
elif "running_var" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = 'running_var'
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_ : int = 'num_batches_tracked'
else:
SCREAMING_SNAKE_CASE_ : List[str] = None
set_recursively(A__, A__, A__, A__, A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ : Optional[int] = name.split('.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(items[0] )
SCREAMING_SNAKE_CASE_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def a__ ( A__, A__, A__, A__=None, A__=None, A__=None, ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Any = SpeechTaConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE_ : int = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE_ : List[str] = config.max_text_positions
SCREAMING_SNAKE_CASE_ : List[Any] = SpeechTaForSpeechToText(A__ )
elif task == "t2s":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_8_7_6
SCREAMING_SNAKE_CASE_ : int = 6_0_0
SCREAMING_SNAKE_CASE_ : int = config.max_speech_positions
SCREAMING_SNAKE_CASE_ : Optional[int] = SpeechTaForTextToSpeech(A__ )
elif task == "s2s":
SCREAMING_SNAKE_CASE_ : int = 1_8_7_6
SCREAMING_SNAKE_CASE_ : List[str] = config.max_speech_positions
SCREAMING_SNAKE_CASE_ : List[Any] = SpeechTaForSpeechToSpeech(A__ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
SCREAMING_SNAKE_CASE_ : Dict = SpeechTaTokenizer(A__, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken('<mask>', lstrip=A__, rstrip=A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : Tuple = SpeechTaProcessor(tokenizer=A__, feature_extractor=A__ )
processor.save_pretrained(A__ )
SCREAMING_SNAKE_CASE_ : Any = torch.load(A__ )
recursively_load_weights(fairseq_checkpoint['model'], A__, A__ )
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ : Any =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 101 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 0 |
"""simple docstring"""
from math import ceil, sqrt
def UpperCamelCase (SCREAMING_SNAKE_CASE = 100_0000 ):
UpperCamelCase : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCamelCase : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCamelCase : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase :
def __init__( self : Any ):
"""simple docstring"""
_snake_case = []
_snake_case = 0
_snake_case = 0
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.head == self.tail
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any ):
"""simple docstring"""
self.data.append(__lowerCamelCase )
_snake_case = self.tail + 1
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.data[self.head]
_snake_case = self.head + 1
return ret
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return self.tail - self.head
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCAmelCase :
def __init__( self : Union[str, Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = data
_snake_case = None
_snake_case = None
_snake_case = 1
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.data
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.left
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.right
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return self.height
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = data
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : MyNode | None ):
"""simple docstring"""
_snake_case = node
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : MyNode | None ):
"""simple docstring"""
_snake_case = node
def __UpperCAmelCase ( self : int , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = height
def snake_case ( lowerCAmelCase_ ) -> int:
if node is None:
return 0
return node.get_height()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if a > b:
return a
return b
def snake_case ( lowerCAmelCase_ ) -> MyNode:
print('''left rotation node:''' , node.get_data() )
_snake_case = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase_ )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase_ )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase_ )
return ret
def snake_case ( lowerCAmelCase_ ) -> MyNode:
print('''right rotation node:''' , node.get_data() )
_snake_case = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase_ )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase_ )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase_ )
return ret
def snake_case ( lowerCAmelCase_ ) -> MyNode:
_snake_case = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase_ ) )
return right_rotation(lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ ) -> MyNode:
_snake_case = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase_ ) )
return left_rotation(lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> MyNode | None:
if node is None:
return MyNode(lowerCAmelCase_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_snake_case = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_snake_case = right_rotation(lowerCAmelCase_ )
else:
_snake_case = lr_rotation(lowerCAmelCase_ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_snake_case = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_snake_case = rl_rotation(lowerCAmelCase_ )
else:
_snake_case = left_rotation(lowerCAmelCase_ )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase_ )
return node
def snake_case ( lowerCAmelCase_ ) -> Any:
while True:
_snake_case = root.get_right()
if right_child is None:
break
_snake_case = right_child
return root.get_data()
def snake_case ( lowerCAmelCase_ ) -> Any:
while True:
_snake_case = root.get_left()
if left_child is None:
break
_snake_case = left_child
return root.get_data()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> MyNode | None:
_snake_case = root.get_left()
_snake_case = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_snake_case = get_left_most(lowerCAmelCase_ )
root.set_data(lowerCAmelCase_ )
root.set_right(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) )
elif left_child is not None:
_snake_case = left_child
elif right_child is not None:
_snake_case = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) )
if get_height(lowerCAmelCase_ ) - get_height(lowerCAmelCase_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_snake_case = left_rotation(lowerCAmelCase_ )
else:
_snake_case = rl_rotation(lowerCAmelCase_ )
elif get_height(lowerCAmelCase_ ) - get_height(lowerCAmelCase_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_snake_case = right_rotation(lowerCAmelCase_ )
else:
_snake_case = lr_rotation(lowerCAmelCase_ )
_snake_case = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase_ )
return root
class UpperCAmelCase :
def __init__( self : int ):
"""simple docstring"""
_snake_case = None
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return get_height(self.root )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Any ):
"""simple docstring"""
print('''insert:''' + str(__lowerCamelCase ) )
_snake_case = insert_node(self.root , __lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : Any ):
"""simple docstring"""
print('''delete:''' + str(__lowerCamelCase ) )
if self.root is None:
print('''Tree is empty!''' )
return
_snake_case = del_node(self.root , __lowerCamelCase )
def __str__( self : str , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_snake_case = ''''''
_snake_case = MyQueue()
q.push(self.root )
_snake_case = self.get_height()
if layer == 0:
return output
_snake_case = 0
while not q.is_empty():
_snake_case = q.pop()
_snake_case = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__lowerCamelCase )
q.push(__lowerCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_snake_case = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , __lowerCamelCase ) - 1:
_snake_case = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def snake_case ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
snake_case = AVLtree()
snake_case = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 103 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _lowerCamelCase ( UpperCAmelCase_ : np.ndarray, UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> np.ndarray:
"""simple docstring"""
A__ = np.array(UpperCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
def _lowerCamelCase ( UpperCAmelCase_ : np.ndarray, UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> np.ndarray:
"""simple docstring"""
A__ = np.array(UpperCAmelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
UpperCamelCase = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 104 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : Tuple = {
'''RUCAIBox/mvp''': 10_24,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = ["input_ids", "attention_mask"]
__a : Dict = MvpTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__="replace" ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<s>" ,snake_case__="<unk>" ,snake_case__="<pad>" ,snake_case__="<mask>" ,snake_case__=False ,snake_case__=True ,**snake_case__ ,):
super().__init__(
snake_case__ ,snake_case__ ,tokenizer_file=snake_case__ ,errors=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,add_prefix_space=snake_case__ ,trim_offsets=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : Tuple = getattr(snake_case__ ,pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : Tuple = add_prefix_space
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_ : Optional[int] = 'post_processor'
SCREAMING_SNAKE_CASE_ : str = getattr(self.backend_tokenizer ,snake_case__ ,snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ : List[str] = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(state['cls'] )
SCREAMING_SNAKE_CASE_ : List[Any] = False
if state.get('add_prefix_space' ,snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : Any = add_prefix_space
SCREAMING_SNAKE_CASE_ : str = True
if state.get('trim_offsets' ,snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE_ : Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE_ : str = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(snake_case__ ,state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer ,snake_case__ ,snake_case__ )
@property
def snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else value
SCREAMING_SNAKE_CASE_ : Tuple = value
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.get('is_split_into_words' ,snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('is_split_into_words' ,snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ ,**snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._tokenizer.model.save(snake_case__ ,name=snake_case__ )
return tuple(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 105 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__snake_case :List[str] ={'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A_ : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ) -> Any:
A = ZeroShotClassificationPipeline(
model=__UpperCamelCase , tokenizer=__UpperCamelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> int:
A = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
# No kwarg
A = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
A = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
A = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(__UpperCamelCase , {'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
A = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
__UpperCamelCase , [
{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(1 )
] , )
A = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
__UpperCamelCase , [
{'sequence': ANY(__UpperCamelCase ), 'labels': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], 'scores': [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__UpperCamelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(__UpperCamelCase ):
classifier(__UpperCamelCase , candidate_labels='politics' )
with self.assertRaises(__UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(__UpperCamelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(__UpperCamelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__UpperCamelCase , )
self.run_entailment_id(__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : Pipeline ) -> Any:
A = zero_shot_classifier.model.config
A = config.labelaid
A = zero_shot_classifier.entailment_id
A = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A = original_labelaid
self.assertEqual(__UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __UpperCamelCase ( self : int ) -> Dict:
A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __UpperCamelCase ( self : List[str] ) -> Any:
A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , ) | 106 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 0 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
_A = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_A = json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_A = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_A = json.loads(__snake_case )
if not mpi_options.get('sagemaker_mpi_enabled' , __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.', UpperCamelCase__, )
@cached_property
def __UpperCAmelCase ( self : str ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
_A = torch.device('cpu' )
_A = 0
elif is_sagemaker_model_parallel_available():
_A = smp.local_rank()
_A = torch.device('cuda', UpperCamelCase__ )
_A = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp', timeout=self.ddp_timeout_delta )
_A = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
_A = torch.device('cuda', self.local_rank )
_A = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_A = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_A = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl', timeout=self.ddp_timeout_delta )
_A = torch.device('cuda', self.local_rank )
_A = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCAmelCase ( self : Any ) -> int:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
return False
| 107 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 0 |
import re
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> bool:
_UpperCAmelCase = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(__snake_case , __snake_case ) )
if __name__ == "__main__":
__a: Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 108 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
a = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] ,reference_urls=[
"""https://github.com/m-popovic/chrF""",
] ,)
def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[str] ,lowerCamelCase : Any ,lowerCamelCase : int = CHRF.CHAR_ORDER ,lowerCamelCase : int = CHRF.WORD_ORDER ,lowerCamelCase : int = CHRF.BETA ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
__SCREAMING_SNAKE_CASE = CHRF(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(lowerCamelCase ,lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 109 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def snake_case__ ( _A: list , _A: list , _A: list , _A: int ) -> list:
'''simple docstring'''
lowerCAmelCase = 0
# Number of processes finished
lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase = [0] * no_of_process
# List to include calculation results
lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase = [burst_time[i] for i in np.argsort(__snake_case )]
lowerCAmelCase = [process_name[i] for i in np.argsort(__snake_case )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase = arrival_time[i]
lowerCAmelCase = 0
# Index showing the location of the process being performed
lowerCAmelCase = 0
# Saves the current response ratio.
lowerCAmelCase = 0
for i in range(0 , __snake_case ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase = temp
lowerCAmelCase = i
# Calculate the turn around time
lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def snake_case__ ( _A: list , _A: list , _A: list , _A: int ) -> list:
'''simple docstring'''
lowerCAmelCase = [0] * no_of_process
for i in range(0 , __snake_case ):
lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__lowercase = 5
__lowercase = ["""A""", """B""", """C""", """D""", """E"""]
__lowercase = [1, 2, 3, 4, 5]
__lowercase = [1, 2, 3, 4, 5]
__lowercase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__lowercase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 370 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: int ) -> List[str]:
_A = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_A = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_A = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_A = tf_top_k_top_p_filtering(__A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_A = output[output != -float('''inf''' )]
_A = tf.cast(
tf.where(tf.not_equal(__A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__A , __A , rtol=1e-12 )
tf.debugging.assert_equal(__A , __A )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase , lowerCamelCase__ ):
"""simple docstring"""
if is_tf_available():
A_ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def __A ( self: Optional[Any] ) -> Any:
_A = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_A = 2
_A = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self: Dict , __A: Optional[int] ) -> int:
super(__A , self ).__init__()
_A = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__A , )
def __A ( self: Any , __A: str , __A: Dict ) -> int:
_A = self.model.generate(
input_ids=__A , attention_mask=__A , max_new_tokens=__A , return_dict_in_generate=__A , )
return {"sequences": outputs["sequences"]}
_A = [[2, 0], [1_02, 1_03]]
_A = [[1, 0], [1, 1]]
_A = DummyModel(model=__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__A , __A , signatures={'''serving_default''': dummy_model.serving} )
_A = tf.saved_model.load(__A ).signatures['''serving_default''']
for batch_size in range(1 , len(__A ) + 1 ):
_A = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
_A = serving_func(**__A )['''sequences''']
_A = test_model.generate(**__A , max_new_tokens=__A )
tf.debugging.assert_equal(__A , __A )
@slow
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_A = 1
_A = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self: Any , __A: Optional[int] ) -> Any:
super(__A , self ).__init__()
_A = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__A , )
def __A ( self: List[str] , __A: List[Any] , __A: Tuple ) -> Optional[int]:
_A = self.model.generate(
input_ids=__A , attention_mask=__A , max_new_tokens=__A , return_dict_in_generate=__A , )
return {"sequences": outputs["sequences"]}
_A = [[2], [1_02, 1_03]]
_A = [[1], [1, 1]]
_A = DummyModel(model=__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__A , __A , signatures={'''serving_default''': dummy_model.serving} )
_A = tf.saved_model.load(__A ).signatures['''serving_default''']
for input_row in range(len(__A ) ):
_A = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
_A = serving_func(**__A )['''sequences''']
_A = test_model.generate(**__A , max_new_tokens=__A )
tf.debugging.assert_equal(__A , __A )
@slow
@require_tensorflow_text
def __A ( self: List[Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=__A )
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self: Dict ) -> Optional[Any]:
super().__init__()
_A = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__A , '''spiece.model''' ) , '''rb''' ).read() )
_A = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __A ( self: int , __A: Any , *__A: List[str] , **__A: Optional[Any] ) -> List[Any]:
_A = self.tokenizer.tokenize(__A )
_A ,_A = text.pad_model_inputs(
__A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_A = self.model.generate(input_ids=__A , attention_mask=__A )
return self.tokenizer.detokenize(__A )
_A = CompleteSentenceTransformer()
_A = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
_A = complete_model(__A )
_A = tf.keras.Model(__A , __A )
keras_model.save(__A )
def __A ( self: Tuple ) -> Dict:
_A = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
_A = 14
_A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_A = '''Hello, my dog is cute and'''
_A = tokenizer(__A , return_tensors='''tf''' )
_A = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_A = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
_A = model.generate(**__A , eos_token_id=__A , **__A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_A = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
_A = model.generate(**__A , eos_token_id=__A , **__A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self: Optional[Any] ) -> Tuple:
_A = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_A = '''Hugging Face is a technology company based in New York and Paris.'''
_A = bart_tokenizer(__A , return_tensors='''tf''' ).input_ids
_A = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_A = bart_model.generate(__A ).numpy()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
"""simple docstring"""
def __A ( self: Union[str, Any] , __A: int , __A: Dict=None , **__A: List[str] ) -> Optional[Any]:
return super().call(__A , **__A )
_A = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
_A = bart_model.generate(__A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(__A , __A ) )
class SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __A ( self: Tuple , __A: Tuple , **__A: Any ) -> List[Any]:
return super().call(__A , **__A )
_A = FakeEncoder(bart_model.config , bart_model.model.shared )
_A = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_A = bart_model.generate(__A ).numpy()
with self.assertRaises(__A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__A , foo='''bar''' )
| 484 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ : Optional[Any] = 0
lowercase__ : List[Any] = number
while duplicate > 0:
lowercase__ , lowercase__ : int = divmod(__snake_case , 10 )
fact_sum += factorial(__snake_case )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
lowerCAmelCase_ = int(input('Enter number: ').strip())
print(
F'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
)
| 560 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ['image_processor', 'tokenizer']
lowerCAmelCase__ : str = 'CLIPImageProcessor'
lowerCAmelCase__ : Optional[Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self: str , __lowerCAmelCase: Tuple=None , __lowerCAmelCase: List[str]=None , **__lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCAmelCase , )
__UpperCAmelCase = kwargs.pop("feature_extractor" )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self: Any , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: Any=None , **__lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def _UpperCAmelCase ( self: str , *__lowerCAmelCase: List[Any] , **__lowerCAmelCase: List[str] ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: Dict , *__lowerCAmelCase: Tuple , **__lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 221 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 0 |
'''simple docstring'''
from math import pow, sqrt
def snake_case ( *snake_case : float ) -> bool:
"""simple docstring"""
lowerCAmelCase = len(__snake_case ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case ( snake_case : float , snake_case : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def snake_case ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def snake_case ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def snake_case ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def snake_case ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 284 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ : int = logging.getLogger()
UpperCamelCase__ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( lowerCamelCase__ ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int:
'''simple docstring'''
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
UpperCAmelCase__ : Dict = {'''source''': '''What is love ?''', '''target''': '''life'''}
UpperCAmelCase__ : List[Any] = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase__ : Optional[Any] = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase_ ,f'''{split}.{field}''' ) ,'''w''' ) as f:
f.write(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = "pytorch" ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Dict = os.path.join(lowerCamelCase_ ,'''output''' )
UpperCAmelCase__ : Dict = os.path.join(lowerCamelCase_ ,'''data''' )
self._create_dummy_data(data_dir=lowerCamelCase_ )
UpperCAmelCase__ : int = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
UpperCAmelCase__ : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase_ ,env=self.get_env() )
UpperCAmelCase__ : Dict = os.path.join(lowerCamelCase_ ,'''metrics.json''' )
with open(lowerCamelCase_ ) as f:
UpperCAmelCase__ : Dict = json.load(lowerCamelCase_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._run_finetune(gpus=1 ,distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Dict = self._run_finetune(gpus=1 ,distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] ,0.2 )
| 614 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> bool:
__UpperCAmelCase = len(__snake_case )
__UpperCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
snake_case = IFImgaImgSuperResolutionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
snake_case = PipelineTesterMixin.required_optional_params - {'latents'}
def __UpperCAmelCase ( self : Optional[Any] ):
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCAmelCase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCAmelCase ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCAmelCase ( self : List[str] ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def __UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 129 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if isinstance(__snake_case , np.ndarray ):
return list(tensor.shape )
lowerCAmelCase__ : Optional[int] = tf.shape(__snake_case )
if tensor.shape == tf.TensorShape(__snake_case ):
return dynamic
lowerCAmelCase__ : int = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__snake_case )]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=__snake_case , name=__snake_case )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=1e-5 , UpperCamelCase=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__snake_case , __snake_case ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowerCAmelCase__ , lowerCAmelCase__ : Any = tf.nn.moments(__snake_case , axes=[axis] , keepdims=__snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCAmelCase__ : Union[str, Any] = [1] * inputs.shape.rank
lowerCAmelCase__ : Tuple = shape_list(__snake_case )[axis]
lowerCAmelCase__ : Optional[int] = tf.reshape(__snake_case , __snake_case )
lowerCAmelCase__ : Any = tf.reshape(__snake_case , __snake_case )
# Compute layer normalization using the batch_normalization
# function.
lowerCAmelCase__ : int = tf.nn.batch_normalization(
__snake_case , __snake_case , __snake_case , offset=__snake_case , scale=__snake_case , variance_epsilon=__snake_case , )
return outputs
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 , UpperCamelCase=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCAmelCase__ : Any = tf.shape(__snake_case )
lowerCAmelCase__ : int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCAmelCase__ : Any = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(__snake_case , tf.Tensor ):
lowerCAmelCase__ : List[str] = tf.convert_to_tensor(__snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCAmelCase__ : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCAmelCase__ : Dict = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCAmelCase__ : Optional[int] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__snake_case , tf.cast(__snake_case , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__snake_case )}) must be smaller than the embedding """
f"""layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCAmelCase__ : Tuple = [x for x in data if len(__snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
lowerCAmelCase__ : Tuple = np.asarray(__snake_case )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : Dict = np.array_split(__snake_case , __snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCAmelCase__ : Dict = np.array_split(__snake_case , __snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__snake_case ):
lowerCAmelCase__ : Optional[Any] = chunk_data
else:
lowerCAmelCase__ : Optional[int] = data
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if name in group.attrs:
lowerCAmelCase__ : List[Any] = [n.decode("""utf8""" ) if hasattr(__snake_case , """decode""" ) else n for n in group.attrs[name]]
else:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Any = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(__snake_case , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
def _expand_single_ad_tensor(UpperCamelCase ):
if isinstance(__snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __snake_case )
| 565 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 676 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCAmelCase__ :List[Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(__snake_case )
if number < 0:
return False
lowerCAmelCase__ :Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import functools
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ (lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = ['image_processor']
lowerCamelCase_ : Union[str, Any] = 'SamImageProcessor'
def __init__( self , UpperCamelCase__ ) -> List[str]:
super().__init__(UpperCamelCase__ )
lowerCamelCase : str = self.image_processor
lowerCamelCase : int = -10
lowerCamelCase : Optional[int] = self.image_processor.size["longest_edge"]
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any:
lowerCamelCase : str = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCamelCase : str = encoding_image_processor["original_sizes"]
if hasattr(UpperCamelCase__ , "numpy" ): # Checks if Torch or TF tensor
lowerCamelCase : List[str] = original_sizes.numpy()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = self._check_and_preprocess_points(
input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , )
lowerCamelCase : str = self._normalize_and_convert(
UpperCamelCase__ , UpperCamelCase__ , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , )
return encoding_image_processor
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="pt" , ) -> int:
if input_points is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
lowerCamelCase : Tuple = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] ) for point in input_points
]
else:
lowerCamelCase : List[Any] = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ )
for point, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self._pad_points_and_labels(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = np.array(UpperCamelCase__ )
if input_labels is not None:
lowerCamelCase : Dict = np.array(UpperCamelCase__ )
if input_boxes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
lowerCamelCase : List[str] = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] , is_bounding_box=UpperCamelCase__ )
for box in input_boxes
]
else:
lowerCamelCase : int = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ , is_bounding_box=UpperCamelCase__ )
for box, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
lowerCamelCase : Optional[int] = np.array(UpperCamelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
lowerCamelCase : Any = torch.from_numpy(UpperCamelCase__ )
# boxes batch size of 1 by default
lowerCamelCase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCamelCase : Optional[int] = tf.convert_to_tensor(UpperCamelCase__ )
# boxes batch size of 1 by default
lowerCamelCase : Any = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCamelCase : Optional[Any] = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
lowerCamelCase : str = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCamelCase : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
lowerCamelCase : Dict = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCamelCase : Optional[Any] = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
lowerCamelCase : Union[str, Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCamelCase : Tuple = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
lowerCamelCase : Dict = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
lowerCamelCase : List[str] = max([point.shape[0] for point in input_points] )
lowerCamelCase : Any = []
for i, point in enumerate(UpperCamelCase__ ):
if point.shape[0] != expected_nb_points:
lowerCamelCase : int = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCamelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase__ )
lowerCamelCase : Tuple = processed_input_points
return input_points, input_labels
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
lowerCamelCase , lowerCamelCase : List[Any] = original_size
lowerCamelCase , lowerCamelCase : Tuple = self.image_processor._get_preprocess_shape(UpperCamelCase__ , longest_edge=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = deepcopy(UpperCamelCase__ ).astype(UpperCamelCase__ )
if is_bounding_box:
lowerCamelCase : Tuple = coords.reshape(-1 , 2 , 2 )
lowerCamelCase : str = coords[..., 0] * (new_w / old_w)
lowerCamelCase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCamelCase : Union[str, Any] = coords.reshape(-1 , 4 )
return coords
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[Any]:
if input_points is not None:
if hasattr(UpperCamelCase__ , "numpy" ): # Checks for TF or Torch tensor
lowerCamelCase : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_points[0] , UpperCamelCase__ ):
raise ValueError("Input points must be a list of list of floating points." )
lowerCamelCase : List[str] = [np.array(UpperCamelCase__ ) for input_point in input_points]
else:
lowerCamelCase : str = None
if input_labels is not None:
if hasattr(UpperCamelCase__ , "numpy" ):
lowerCamelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_labels[0] , UpperCamelCase__ ):
raise ValueError("Input labels must be a list of list integers." )
lowerCamelCase : List[Any] = [np.array(UpperCamelCase__ ) for label in input_labels]
else:
lowerCamelCase : Optional[int] = None
if input_boxes is not None:
if hasattr(UpperCamelCase__ , "numpy" ):
lowerCamelCase : Optional[int] = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase__ , UpperCamelCase__ )
or not isinstance(input_boxes[0] , UpperCamelCase__ )
or not isinstance(input_boxes[0][0] , UpperCamelCase__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
lowerCamelCase : Union[str, Any] = [np.array(UpperCamelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
lowerCamelCase : List[str] = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase__ ) )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
return self.image_processor.post_process_masks(*UpperCamelCase__ , **UpperCamelCase__ )
| 311 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowerCAmelCase = Vector()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(__lowerCAmelCase) , """(0,0,0,0,0,1)""")
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2, 3, 4])
self.assertEqual(len(__lowerCAmelCase) , 4)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2])
lowerCAmelCase = Vector([1, 2, 3, 4, 5])
lowerCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowerCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2, 3])
lowerCAmelCase = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2, 3])
lowerCAmelCase = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2, 3])
lowerCAmelCase = Vector([2, -1, 4]) # for test of dot product
lowerCAmelCase = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def a_ ( self):
"""simple docstring"""
self.assertEqual(str(zero_vector(10)).count("""0""") , 10)
def a_ ( self):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 2, 3])
lowerCAmelCase = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , __lowerCAmelCase , __lowerCAmelCase)) , """(3,4,7)""")
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 0, 0, 0, 0, 0])
lowerCAmelCase = x.copy()
self.assertEqual(str(__lowerCAmelCase) , str(__lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(__lowerCAmelCase) , """(0,1,0)""")
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowerCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(__lowerCAmelCase , __lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowerCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(__lowerCAmelCase , __lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowerCAmelCase = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__lowerCAmelCase))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowerCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowerCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def a_ ( self):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 370 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__A = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""BeitFeatureExtractor"""]
__A = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
lowercase__ : Optional[int] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 560 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 0 |
from __future__ import annotations
a_ = list[list[int]]
# assigning initial values to the grid
a_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCAmelCase ( A_ : Matrix , A_ : int , A_ : int , A_ : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCAmelCase ( A_ : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCAmelCase ( A_ : Matrix ) -> Matrix | None:
if location := find_empty_location(__snake_case ):
__UpperCAmelCase , __UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
__UpperCAmelCase = digit
if sudoku(__snake_case ) is not None:
return grid
__UpperCAmelCase = 0
return None
def __lowerCAmelCase ( A_ : Matrix ) -> None:
for row in grid:
for cell in row:
print(__snake_case , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 221 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 1
lowerCAmelCase = 3
lowerCAmelCase = (32, 32)
lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def extract(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
class _snake_case :
def __init__( self ):
'''simple docstring'''
lowerCAmelCase = torch.ones([0] )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.dummy_cond_unet
lowerCAmelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_vae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCAmelCase = 77
lowerCAmelCase = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowerCAmelCase = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowerCAmelCase = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.dummy_cond_unet
lowerCAmelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_vae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCAmelCase = 77
lowerCAmelCase = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
# put models in fp16
lowerCAmelCase = unet.half()
lowerCAmelCase = vae.half()
lowerCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='np' , image=_SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase = init_image.resize((7_60, 5_04) )
lowerCAmelCase = 'BAAI/AltDiffusion'
lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCAmelCase = 'A fantasy landscape, trending on artstation'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
lowerCAmelCase = output.images[0]
lowerCAmelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
lowerCAmelCase = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
lowerCAmelCase = 'BAAI/AltDiffusion'
lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCAmelCase = 'A fantasy landscape, trending on artstation'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 284 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
return x + 2
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = '''x = 3'''
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : str = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
assert result == 3
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3} )
UpperCAmelCase__ : Dict = '''x = y'''
UpperCAmelCase__ : Optional[Any] = {'''y''': 5}
UpperCAmelCase__ : str = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 5, '''y''': 5} )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''y = add_two(x)'''
UpperCAmelCase__ : List[str] = {'''x''': 3}
UpperCAmelCase__ : Optional[int] = evaluate(lowerCamelCase_ ,{'''add_two''': add_two} ,state=lowerCamelCase_ )
assert result == 5
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : Optional[int] = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Any = '''x = 3'''
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Dict = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
assert result == 3
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3} )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
UpperCAmelCase__ : int = {'''x''': 3}
UpperCAmelCase__ : int = evaluate(lowerCamelCase_ ,{'''add_two''': add_two} ,state=lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''y''': 5} )
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = '''x = 3\ny = 5'''
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : List[str] = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''y''': 5} )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : str = '''text = f\'This is x: {x}.\''''
UpperCAmelCase__ : List[Any] = {'''x''': 3}
UpperCAmelCase__ : Dict = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
UpperCAmelCase__ : Tuple = {'''x''': 3}
UpperCAmelCase__ : Any = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''y''': 2} )
UpperCAmelCase__ : int = {'''x''': 8}
UpperCAmelCase__ : int = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 8, '''y''': 5} )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''test_list = [x, add_two(x)]'''
UpperCAmelCase__ : Optional[int] = {'''x''': 3}
UpperCAmelCase__ : Optional[int] = evaluate(lowerCamelCase_ ,{'''add_two''': add_two} ,state=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,[3, 5] )
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''test_list''': [3, 5]} )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = '''y = x'''
UpperCAmelCase__ : str = {'''x''': 3}
UpperCAmelCase__ : Any = evaluate(lowerCamelCase_ ,{} ,state=lowerCamelCase_ )
assert result == 3
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''y''': 3} )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''test_list = [x, add_two(x)]\ntest_list[1]'''
UpperCAmelCase__ : Any = {'''x''': 3}
UpperCAmelCase__ : Union[str, Any] = evaluate(lowerCamelCase_ ,{'''add_two''': add_two} ,state=lowerCamelCase_ )
assert result == 5
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''test_list''': [3, 5]} )
UpperCAmelCase__ : List[Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
UpperCAmelCase__ : Any = {'''x''': 3}
UpperCAmelCase__ : Any = evaluate(lowerCamelCase_ ,{'''add_two''': add_two} ,state=lowerCamelCase_ )
assert result == 5
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = '''x = 0\nfor i in range(3):\n x = i'''
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : List[str] = evaluate(lowerCamelCase_ ,{'''range''': range} ,state=lowerCamelCase_ )
assert result == 2
self.assertDictEqual(lowerCamelCase_ ,{'''x''': 2, '''i''': 2} )
| 614 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple =1
@register_to_config
def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) )
lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ =timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device )
lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =torch.zeros_like(lowerCAmelCase )
lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase_ =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase_ =diffusion.unsqueeze(-1 )
lowerCamelCase_ =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase_ =randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype )
lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase_ =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase_ =step_size.unsqueeze(-1 )
lowerCamelCase_ =sample + step_size * model_output
lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase_ =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
lowerCamelCase_ =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 676 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCAmelCase :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=False , __A=True , __A=99 , __A=32 , __A=4 , __A=4 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = None
__UpperCAmelCase = vocab_size - 1
__UpperCAmelCase = vocab_size - 1
__UpperCAmelCase = vocab_size - 1
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , __A , __A , __A , __A ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(__A )
__UpperCAmelCase = model.init_cache(input_ids.shape[0] , __A )
__UpperCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCAmelCase = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
__UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCAmelCase = model(
input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , )
__UpperCAmelCase = model(__A )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def __lowerCamelCase ( self , __A , __A , __A , __A ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(__A )
__UpperCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCAmelCase = model.init_cache(input_ids.shape[0] , __A )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCAmelCase = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
__UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , )
__UpperCAmelCase = model(__A , attention_mask=__A )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
_A : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_A : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__UpperCAmelCase = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__A , __A , __A , __A )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__A , __A , __A , __A )
@tooslow
def __lowerCamelCase ( self ):
__UpperCAmelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCAmelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__A , truncation=__A )
__UpperCAmelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCAmelCase = False
__UpperCAmelCase = model.config.eos_token_id
__UpperCAmelCase = jax.jit(model.generate )
__UpperCAmelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCAmelCase = tokenizer.batch_decode(__A , skip_special_tokens=__A )
__UpperCAmelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__A , __A )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCAmelCase = self._prepare_for_class(__A , __A )
__UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(__A , __A )
__UpperCAmelCase , __UpperCAmelCase = pt_inputs['input_ids'].shape
__UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = pt_model_class(__A ).eval()
__UpperCAmelCase = model_class(__A , dtype=jnp.floataa )
__UpperCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A )
__UpperCAmelCase = fx_state
with torch.no_grad():
__UpperCAmelCase = pt_model(**__A ).to_tuple()
__UpperCAmelCase = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__A )
__UpperCAmelCase = model_class.from_pretrained(__A , from_pt=__A )
__UpperCAmelCase = fx_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCAmelCase = self._prepare_for_class(__A , __A )
__UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(__A , __A )
__UpperCAmelCase = pt_model_class(__A ).eval()
__UpperCAmelCase = model_class(__A , dtype=jnp.floataa )
__UpperCAmelCase = load_flax_weights_in_pytorch_model(__A , fx_model.params )
__UpperCAmelCase , __UpperCAmelCase = pt_inputs['input_ids'].shape
__UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCAmelCase = pt_model(**__A ).to_tuple()
__UpperCAmelCase = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__A )
__UpperCAmelCase = pt_model_class.from_pretrained(__A , from_flax=__A )
with torch.no_grad():
__UpperCAmelCase = pt_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 126 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__magic_name__ = """<<<<<<< This should probably be modified because it mentions: """
__magic_name__ = """=======
>>>>>>>
"""
__magic_name__ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__magic_name__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _A ( __lowercase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Any ):
lowerCamelCase__ = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = get_logger("""datasets-cli/converting""" )
lowerCamelCase__ = tfds_path
lowerCamelCase__ = datasets_directory
def __UpperCAmelCase ( self : Any ):
if os.path.isdir(self._tfds_path ):
lowerCamelCase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCamelCase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowerCamelCase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {}
if os.path.isdir(self._tfds_path ):
lowerCamelCase__ = os.listdir(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = []
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase__ = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase__ = """"""
continue
elif "from absl import logging" in out_line:
lowerCamelCase__ = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowerCamelCase__ = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCamelCase__ = True
lowerCamelCase__ = list(filter(lambda SCREAMING_SNAKE_CASE_ : e in out_line , SCREAMING_SNAKE_CASE_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(SCREAMING_SNAKE_CASE_ ) + """\n""" )
out_lines.append(SCREAMING_SNAKE_CASE_ )
out_lines.append(SCREAMING_SNAKE_CASE_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase__ = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase__ = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , SCREAMING_SNAKE_CASE_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowerCamelCase__ = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase__ = True
out_lines.append(SCREAMING_SNAKE_CASE_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase__ = f_name.replace(""".py""" , """""" )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(SCREAMING_SNAKE_CASE_ )
if needs_manual_update:
with_manual_update.append(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowerCamelCase__ = os.path.basename(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 129 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( __snake_case : dict ) -> int:
"""simple docstring"""
lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
lowerCamelCase_ =max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def a_ ( __snake_case : int , __snake_case : int ) -> List[range]:
"""simple docstring"""
lowerCamelCase_ =[]
for group_idx in range(__snake_case ):
lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ =range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]:
"""simple docstring"""
lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def a_ ( __snake_case : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict:
"""simple docstring"""
lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
lowerCamelCase_ ={}
for size in list_sizes:
lowerCamelCase_ =list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ =dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 676 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=2 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=36 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=6 ,__UpperCAmelCase=6 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=1000 ,) -> Union[str, Any]:
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Any = text_seq_length
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Optional[int] = use_input_mask
lowerCAmelCase__ : Dict = use_token_type_ids
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : List[str] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[int] = coordinate_size
lowerCAmelCase__ : Tuple = shape_size
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : Tuple = num_choices
lowerCAmelCase__ : Tuple = scope
lowerCAmelCase__ : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase__ : int = text_seq_length
lowerCAmelCase__ : str = (image_size // patch_size) ** 2 + 1
lowerCAmelCase__ : List[Any] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase__ : Any = bbox[i, j, 3]
lowerCAmelCase__ : Any = bbox[i, j, 1]
lowerCAmelCase__ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase__ : List[str] = bbox[i, j, 2]
lowerCAmelCase__ : List[str] = bbox[i, j, 0]
lowerCAmelCase__ : List[Any] = t
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = None
if self.use_input_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase__ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
lowerCAmelCase__ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = LayoutLMvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# text + image
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase ,pixel_values=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : str = model(__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ : Any = model(__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase__ : Tuple = model(pixel_values=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : str = LayoutLMvaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Optional[int] = self.num_labels
lowerCAmelCase__ : Dict = LayoutLMvaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : int = LayoutLMvaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(
__UpperCAmelCase ,bbox=__UpperCAmelCase ,pixel_values=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,start_positions=__UpperCAmelCase ,end_positions=__UpperCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Tuple = config_and_inputs
lowerCAmelCase__ : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = False
__lowercase : Any = False
__lowercase : List[str] = False
__lowercase : Any = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
return True
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : str = LayoutLMvaModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(__UpperCAmelCase )
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : Dict = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(__UpperCAmelCase ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
elif model_class in get_values(__UpperCAmelCase ):
lowerCAmelCase__ : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
elif model_class in [
*get_values(__UpperCAmelCase ),
]:
lowerCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCAmelCase )
elif model_class in [
*get_values(__UpperCAmelCase ),
]:
lowerCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=__UpperCAmelCase ,)
return inputs_dict
def UpperCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Dict = LayoutLMvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Any = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Any = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values.to(__UpperCAmelCase )
lowerCAmelCase__ : int = torch.tensor([[1, 2]] )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase__ : Optional[Any] = model(
input_ids=input_ids.to(__UpperCAmelCase ) ,bbox=bbox.to(__UpperCAmelCase ) ,pixel_values=pixel_values.to(__UpperCAmelCase ) ,)
# verify the logits
lowerCAmelCase__ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
| 565 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : int = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ =parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ =fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(__snake_case )} examples to process.''' )
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =1_0000
lowerCamelCase_ =time.time()
for text in data:
lowerCamelCase_ =F'''{bos} {text.strip()} {sep}'''
lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
rslt.append(__snake_case )
iter += 1
if iter % interval == 0:
lowerCamelCase_ =time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase_ =time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(__snake_case )} examples processed.''' )
lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase_ =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt]
else:
lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 676 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = eval_examples
lowerCAmelCase__ :Dict = post_process_function
def snake_case ( self , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = "eval" , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = gen_kwargs.copy()
lowerCAmelCase__ :Union[str, Any] = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
lowerCAmelCase__ :List[str] = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
lowerCAmelCase__ :List[str] = gen_kwargs
lowerCAmelCase__ :Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ :Dict = self.get_eval_dataloader(__UpperCAmelCase )
lowerCAmelCase__ :str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ :int = self.compute_metrics
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :str = time.time()
lowerCAmelCase__ :int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ :Union[str, Any] = eval_loop(
__UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
lowerCAmelCase__ :Optional[int] = compute_metrics
lowerCAmelCase__ :Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase__ :str = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase__ :int = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
lowerCAmelCase__ :Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase__ :List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase = "test" , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = gen_kwargs.copy()
lowerCAmelCase__ :Dict = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ :Optional[Any] = self.compute_metrics
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Optional[Any] = time.time()
lowerCAmelCase__ :List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ :Dict = eval_loop(
__UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
lowerCAmelCase__ :Tuple = compute_metrics
lowerCAmelCase__ :int = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ :List[Any] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 'predict' )
lowerCAmelCase__ :int = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase__ :str = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 93 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676 | 0 |
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
lowerCamelCase : Optional[int] = 4
lowerCamelCase : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 311 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {"""vocab_file""": """spiece.model"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
a_ : List[Any] = {"""bert_for_seq_generation""": 5_12}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[int] =[]
lowercase : str =['input_ids', 'attention_mask']
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
lowerCamelCase_ =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 676 | 0 |
'''simple docstring'''
import sys
__lowercase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def snake_case__ ( _A: str = N ) -> int:
'''simple docstring'''
lowerCAmelCase = -sys.maxsize - 1
for i in range(len(__snake_case ) - 12 ):
lowerCAmelCase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase = product
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 370 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: List[Any] ) -> int:
super().__init__()
_A = nn.Linear(3 , 4 )
_A = nn.BatchNormad(4 )
_A = nn.Linear(4 , 5 )
def __A ( self: Optional[Any] , __A: Optional[int] ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
"""simple docstring"""
def __A ( self: Union[str, Any] , __A: Optional[Any] , *__A: List[str] , **__A: int ) -> Dict:
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
"""simple docstring"""
def __A ( self: Tuple , __A: List[str] , __A: Optional[Any] ) -> List[Any]:
return output + 1
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: str ) -> Optional[Any]:
_A = ModelForTest()
_A = ModelHook()
add_hook_to_module(__A , __A )
self.assertEqual(test_model._hf_hook , __A )
self.assertTrue(hasattr(__A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A , '''_hf_hook''' ) )
self.assertFalse(hasattr(__A , '''_old_forward''' ) )
def __A ( self: Dict ) -> Dict:
_A = ModelForTest()
_A = ModelHook()
add_hook_to_module(__A , __A )
add_hook_to_module(__A , __A , append=__A )
self.assertEqual(isinstance(test_model._hf_hook , __A ) , __A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A , '''_hf_hook''' ) )
self.assertFalse(hasattr(__A , '''_old_forward''' ) )
def __A ( self: Dict ) -> Tuple:
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(x + 1 )
_A = test_model(x + 2 )
_A = PreForwardHook()
add_hook_to_module(__A , __A )
_A = test_model(__A )
self.assertTrue(torch.allclose(__A , __A , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_A = PreForwardHook()
add_hook_to_module(__A , __A )
_A = test_model(__A )
self.assertTrue(torch.allclose(__A , __A , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_A = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__A , __A )
_A = test_model(__A )
assert torch.allclose(__A , __A , atol=1e-5 )
def __A ( self: Optional[int] ) -> str:
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(__A )
_A = PostForwardHook()
add_hook_to_module(__A , __A )
_A = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_A = PostForwardHook()
add_hook_to_module(__A , __A )
_A = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_A = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__A , __A )
_A = test_model(__A )
assert torch.allclose(__A , output + 2 , atol=1e-5 )
def __A ( self: str ) -> Any:
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(__A )
_A = PostForwardHook()
add_hook_to_module(__A , __A )
_A = test_model(__A )
self.assertTrue(torch.allclose(__A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_A = True
_A = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __A ( self: List[str] ) -> Optional[Any]:
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A , AlignDevicesHook(io_same_device=__A ) )
_A = torch.randn(2 , 3 ).to(0 )
_A = model(__A )
self.assertEqual(output.device , torch.device(0 ) )
def __A ( self: Any ) -> Optional[int]:
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_A = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __A )
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_A = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def __A ( self: Optional[int] ) -> str:
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_A = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__A , execution_device=__A , offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device , __A )
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A , execution_device=__A , offload=__A , offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def __A ( self: Optional[Any] ) -> str:
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_A = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__A , execution_device=__A , offload=__A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device , __A )
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A , execution_device=__A , offload=__A , weights_map=model.state_dict() , offload_buffers=__A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_A = torch.randn(2 , 3 )
_A = model(__A )
self.assertEqual(output.device , __A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 484 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def __UpperCAmelCase ( __lowerCamelCase ) -> bytes:
lowercase__ : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase__ : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__snake_case ).content
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter Video/IGTV url: ').strip()
lowerCAmelCase_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 560 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676 | 0 |
def __lowerCAmelCase ( A_ : int ) -> str:
__UpperCAmelCase = [False] * len(__snake_case )
__UpperCAmelCase = [-1] * len(__snake_case )
def dfs(A_ : Union[str, Any] , A_ : Dict ):
__UpperCAmelCase = True
__UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 221 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 676 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_UpperCamelCase : List[Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
UpperCAmelCase__ : Optional[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
UpperCAmelCase__ : Optional[int] = model(lowerCamelCase_ )['''last_hidden_state''']
UpperCAmelCase__ : Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,lowerCamelCase_ )
# compare the actual values for a slice.
UpperCAmelCase__ : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 614 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase )-> list[int]:
__UpperCAmelCase = [True] * limit
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__UpperCAmelCase = i * 2
while index < limit:
__UpperCAmelCase = False
__UpperCAmelCase = index + i
__UpperCAmelCase = [2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def _lowerCAmelCase ( _lowerCAmelCase = 1_00_00_00 )-> int:
__UpperCAmelCase = prime_sieve(__snake_case )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
__UpperCAmelCase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__UpperCAmelCase = j - i
__UpperCAmelCase = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 126 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
snake_case = ['input_features', 'attention_mask']
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Any=80 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6000 , SCREAMING_SNAKE_CASE_ : str=80 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = num_mel_bins
lowerCamelCase__ = do_ceptral_normalize
lowerCamelCase__ = normalize_means
lowerCamelCase__ = normalize_vars
lowerCamelCase__ = True
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCamelCase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowerCamelCase__ = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] = True , SCREAMING_SNAKE_CASE_ : Any = True , SCREAMING_SNAKE_CASE_ : Any = 0.0 , ):
if normalize_means:
lowerCamelCase__ = x[:input_length].mean(axis=0 )
lowerCamelCase__ = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
lowerCamelCase__ = x[:input_length].std(axis=0 )
lowerCamelCase__ = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
lowerCamelCase__ = padding_value
# make sure array is in float32
lowerCamelCase__ = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any = None ):
lowerCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Dict = False , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : List[Any] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ = [raw_speech]
# extract fbank features
lowerCamelCase__ = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ = BatchFeature({"""input_features""": features} )
lowerCamelCase__ = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
lowerCamelCase__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCamelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase__ = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
lowerCamelCase__ = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 129 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =(
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase_ =[]
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 676 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.